View | Details | Raw Unified | Return to bug 36831
Collapse All | Expand All

(-)a/Koha/BackgroundJob/ImportKBARTFile.pm (-151 / +15 lines)
Lines 16-25 package Koha::BackgroundJob::ImportKBARTFile; Link Here
16
# along with Koha; if not, see <http://www.gnu.org/licenses>.
16
# along with Koha; if not, see <http://www.gnu.org/licenses>.
17
17
18
use Modern::Perl;
18
use Modern::Perl;
19
use JSON         qw( decode_json encode_json );
20
use Try::Tiny    qw( catch try );
19
use Try::Tiny    qw( catch try );
21
use MIME::Base64 qw( decode_base64 );
22
use POSIX        qw( floor );
23
20
24
use C4::Context;
21
use C4::Context;
25
22
Lines 98-105 sub process { Link Here
98
95
99
        foreach my $row ( @{$rows} ) {
96
        foreach my $row ( @{$rows} ) {
100
            next if !$row;
97
            next if !$row;
101
            my $new_title   = create_title_hash_from_line_data( $row, $column_headers, $invalid_columns );
98
            my $new_title   = _create_title_hash_from_line_data( $row, $column_headers, $invalid_columns );
102
            my $title_match = check_for_matching_title( $new_title, $package_id );
99
            my $title_match = _check_for_matching_title( $new_title, $package_id );
103
100
104
            if ($title_match) {
101
            if ($title_match) {
105
                $duplicate_titles++;
102
                $duplicate_titles++;
Lines 111-117 sub process { Link Here
111
                };
108
                };
112
            } else {
109
            } else {
113
                try {
110
                try {
114
                    my $formatted_title = format_title($new_title);
111
                    my $formatted_title = _format_title($new_title);
115
                    if ( !$formatted_title->{publication_title} ) {
112
                    if ( !$formatted_title->{publication_title} ) {
116
                        push @messages, {
113
                        push @messages, {
117
                            code          => 'no_title_found',
114
                            code          => 'no_title_found',
Lines 124-130 sub process { Link Here
124
                        my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title)
121
                        my $imported_title = Koha::ERM::EHoldings::Title->new($formatted_title)
125
                            ->store( { create_linked_biblio => $create_linked_biblio } );
122
                            ->store( { create_linked_biblio => $create_linked_biblio } );
126
                        push( @biblio_ids, $imported_title->biblio_id ) if $create_linked_biblio;
123
                        push( @biblio_ids, $imported_title->biblio_id ) if $create_linked_biblio;
127
                        create_linked_resource(
124
                        _create_linked_resource(
128
                            {
125
                            {
129
                                title      => $imported_title,
126
                                title      => $imported_title,
130
                                package_id => $package_id
127
                                package_id => $package_id
Lines 192-198 sub enqueue { Link Here
192
    );
189
    );
193
}
190
}
194
191
195
=head3 format_title
192
=head3 _format_title
196
193
197
Formats a title to fit the names of the database fields in Koha
194
Formats a title to fit the names of the database fields in Koha
198
195
Lines 201-207 Kbart field "coverage_notes" = "notes" in Koha Link Here
201
198
202
=cut
199
=cut
203
200
204
sub format_title {
201
sub _format_title {
205
    my ($title) = @_;
202
    my ($title) = @_;
206
203
207
    $title->{external_id} = $title->{title_id};
204
    $title->{external_id} = $title->{title_id};
Lines 216-259 sub format_title { Link Here
216
    return $title;
213
    return $title;
217
}
214
}
218
215
219
=head3 read_file
216
=head3 _create_title_hash_from_line_data
220
221
Reads a file to provide report headers and lines to be processed
222
223
=cut
224
225
sub read_file {
226
    my ($file) = @_;
227
228
    my $file_content = defined( $file->{file_content} ) ? decode_base64( $file->{file_content} ) : "";
229
    my $delimiter    = $file->{filename} =~ /\.tsv$/    ? "\t"                                   : ",";
230
    my $quote_char   = $file->{filename} =~ /\.tsv$/    ? "\""                                   : "\"";
231
232
    open my $fh, "<", \$file_content or die "Could not open file $file->{filename}: $!";
233
    my $csv = Text::CSV_XS->new(
234
        {
235
            sep_char           => $delimiter,
236
            quote_char         => $quote_char,
237
            binary             => 1,
238
            allow_loose_quotes => 1
239
        }
240
    );
241
    my $headers_to_check = $csv->getline($fh);
242
    my $column_headers   = rescue_EBSCO_files($headers_to_check);
243
    my $lines            = $csv->getline_all( $fh, 0 );
244
    close($fh);
245
246
    unless($csv->eof()) {
247
        my ( $cde, $str, $pos ) = $csv->error_diag();
248
        my $error = $cde ? "$cde, $str, $pos" : "";
249
        warn $error if $error;
250
        return ( $column_headers, $lines, $error );
251
    }
252
253
    return ( $column_headers, $lines );
254
}
255
256
=head3 create_title_hash_from_line_data
257
217
258
Takes a line and creates a hash of the values mapped to the column headings
218
Takes a line and creates a hash of the values mapped to the column headings
259
Only accepts fields that are in the list of permitted KBART fields, other fields are ignored
219
Only accepts fields that are in the list of permitted KBART fields, other fields are ignored
Lines 261-267 Only accepts fields that are in the list of permitted KBART fields, other fields Link Here
261
221
262
=cut
222
=cut
263
223
264
sub create_title_hash_from_line_data {
224
sub _create_title_hash_from_line_data {
265
    my ( $row, $column_headers, $invalid_columns ) = @_;
225
    my ( $row, $column_headers, $invalid_columns ) = @_;
266
226
267
    my %new_title;
227
    my %new_title;
Lines 283-295 sub create_title_hash_from_line_data { Link Here
283
    return \%new_title;
243
    return \%new_title;
284
}
244
}
285
245
286
=head3 check_for_matching_title
246
=head3 _check_for_matching_title
287
247
288
Checks whether this title already exists to avoid duplicates
248
Checks whether this title already exists to avoid duplicates
289
249
290
=cut
250
=cut
291
251
292
sub check_for_matching_title {
252
sub _check_for_matching_title {
293
    my ( $title, $package_id ) = @_;
253
    my ( $title, $package_id ) = @_;
294
254
295
    my $match_parameters = {};
255
    my $match_parameters = {};
Lines 316-335 sub check_for_matching_title { Link Here
316
    return $matching_title_found;
276
    return $matching_title_found;
317
}
277
}
318
278
319
=head3 create_linked_resource
279
=head3 _create_linked_resource
320
280
321
Creates a resource for a newly stored title.
281
Creates a resource for a newly stored title.
322
282
323
=cut
283
=cut
324
284
325
sub create_linked_resource {
285
sub _create_linked_resource {
326
    my ($args) = @_;
286
    my ($args) = @_;
327
287
328
    my $title      = $args->{title};
288
    my $title      = $args->{title};
329
    my $package_id = $args->{package_id};
289
    my $package_id = $args->{package_id};
330
290
331
    my $title_id = $title->title_id;
291
    my $title_id = $title->title_id;
332
    my ( $date_first_issue_online, $date_last_issue_online ) = get_first_and_last_issue_dates($title);
292
    my ( $date_first_issue_online, $date_last_issue_online ) = _get_first_and_last_issue_dates($title);
333
    my $resource = Koha::ERM::EHoldings::Resource->new(
293
    my $resource = Koha::ERM::EHoldings::Resource->new(
334
        {
294
        {
335
            title_id   => $title_id,
295
            title_id   => $title_id,
Lines 342-354 sub create_linked_resource { Link Here
342
    return;
302
    return;
343
}
303
}
344
304
345
=head3 get_first_and_last_issue_dates
305
=head3 _get_first_and_last_issue_dates
346
306
347
Gets and formats a date for storing on the resource. Dates can come from files in YYYY, YYYY-MM or YYYY-MM-DD format
307
Gets and formats a date for storing on the resource. Dates can come from files in YYYY, YYYY-MM or YYYY-MM-DD format
348
308
349
=cut
309
=cut
350
310
351
sub get_first_and_last_issue_dates {
311
sub _get_first_and_last_issue_dates {
352
    my ($title) = @_;
312
    my ($title) = @_;
353
313
354
    return ( undef, undef ) if ( !$title->date_first_issue_online && !$title->date_last_issue_online );
314
    return ( undef, undef ) if ( !$title->date_first_issue_online && !$title->date_last_issue_online );
Lines 372-471 sub get_first_and_last_issue_dates { Link Here
372
    return ( $date_first_issue_online, $date_last_issue_online );
332
    return ( $date_first_issue_online, $date_last_issue_online );
373
}
333
}
374
334
375
=head3 get_valid_headers
376
377
Returns a list of permitted headers in a KBART phase II file
378
379
=cut
380
381
sub get_valid_headers {
382
    return (
383
        'publication_title',
384
        'print_identifier',
385
        'online_identifier',
386
        'date_first_issue_online',
387
        'num_first_vol_online',
388
        'num_first_issue_online',
389
        'date_last_issue_online',
390
        'num_last_vol_online',
391
        'num_last_issue_online',
392
        'title_url',
393
        'first_author',
394
        'title_id',
395
        'embargo_info',
396
        'coverage_depth',
397
        'coverage_notes',
398
        'publisher_name',
399
        'publication_type',
400
        'date_monograph_published_print',
401
        'date_monograph_published_online',
402
        'monograph_volume',
403
        'monograph_edition',
404
        'first_editor',
405
        'parent_publication_title_id',
406
        'preceding_publication_title_id',
407
        'access_type',
408
        'notes'
409
    );
410
}
411
412
=head3 calculate_chunked_params_size
413
414
Calculates average line size to work out how many lines to chunk a large file into
415
Uses only 75% of the max_allowed_packet as an upper limit
416
417
=cut
418
419
sub calculate_chunked_params_size {
420
    my ( $params_size, $max_allowed_packet, $number_of_rows ) = @_;
421
422
    my $average_line_size = $params_size / $number_of_rows;
423
    my $lines_possible    = ( $max_allowed_packet * 0.75 ) / $average_line_size;
424
    my $rounded_value     = floor($lines_possible);
425
    return $rounded_value;
426
}
427
428
=head3 is_file_too_large
429
430
Calculates the final size of the background job object that will need storing to check if we exceed the max_allowed_packet
431
432
=cut
433
434
sub is_file_too_large {
435
    my ( $params_to_store, $max_allowed_packet ) = @_;
436
437
    my $json           = JSON->new->utf8(0);
438
    my $encoded_params = $json->encode($params_to_store);
439
    my $params_size    = length $encoded_params;
440
441
    # A lot more than just the params are stored in the background job table and this is difficult to calculate
442
    # We should allow for no more than 75% of the max_allowed_packet to be made up of the job params to avoid db conflicts
443
    return {
444
        file_too_large => 1,
445
        params_size    => $params_size
446
    } if $params_size > ( $max_allowed_packet * 0.75 );
447
448
    return {
449
        file_too_large => 0,
450
        params_size    => $params_size
451
    };
452
}
453
454
=head3 rescue_EBSCO_files
455
456
EBSCO have an incorrect spelling for "preceding_publication_title_id" in all of their KBART files (preceding is spelled with a double 'e').
457
This means all of their KBART files fail to import using the current methodology.
458
There is no simple way of finding out who the vendor is before importing so all KBART files from any vendor are going to have to be checked for this spelling and corrected.
459
460
=cut
461
462
sub rescue_EBSCO_files {
463
    my ($column_headers) = @_;
464
465
    my ($index) = grep { @$column_headers[$_] eq 'preceeding_publication_title_id' } ( 0 .. @$column_headers - 1 );
466
    @$column_headers[$index] = 'preceding_publication_title_id' if $index;
467
468
    return $column_headers;
469
}
470
471
1;
335
1;
(-)a/Koha/ERM/EHoldings/Title.pm (-1 / +145 lines)
Lines 17-22 package Koha::ERM::EHoldings::Title; Link Here
17
17
18
use Modern::Perl;
18
use Modern::Perl;
19
19
20
use POSIX        qw( floor );
21
use MIME::Base64 qw( decode_base64 );
22
20
use Koha::Database;
23
use Koha::Database;
21
24
22
use base qw(Koha::Object);
25
use base qw(Koha::Object);
Lines 100-105 sub resources { Link Here
100
    return Koha::ERM::EHoldings::Resources->_new_from_dbic($resources_rs);
103
    return Koha::ERM::EHoldings::Resources->_new_from_dbic($resources_rs);
101
}
104
}
102
105
106
107
=head3 read_file
108
109
Reads a KBART file of titles to provide report headers and lines to be processed.
110
Automatically detects whether the file is TSV or CSV based on the first 5 lines
111
112
=cut
113
114
sub read_file {
115
    my ($file) = @_;
116
117
    my $file_content = defined( $file->{file_content} ) ? decode_base64( $file->{file_content} ) : "";
118
    my ( $delimiter, $quote_char ) = _detect_delimiter_and_quote($file_content);
119
120
    return ( undef, undef, "unknown_delimiter" ) unless $delimiter;
121
122
    open my $fh, "<", \$file_content or die "Could not open file $file->{filename}: $!";
123
    my $csv = Text::CSV_XS->new(
124
        {
125
            sep_char           => $delimiter,
126
            quote_char         => $quote_char,
127
            binary             => 1,
128
            allow_loose_quotes => 1
129
        }
130
    );
131
132
    my $headers_to_check = $csv->getline($fh);
133
    my $column_headers   = _rescue_EBSCO_files($headers_to_check);
134
    my $lines            = $csv->getline_all( $fh, 0 );
135
    close($fh);
136
137
    unless ( $csv->eof() ) {
138
        my ( $cde, $str, $pos ) = $csv->error_diag();
139
        my $error = $cde ? "$cde, $str, $pos" : "";
140
        warn $error if $error;
141
        return ( $column_headers, $lines, $error );
142
    }
143
144
    return ( $column_headers, $lines );
145
}
146
147
148
=head3 get_valid_headers
149
150
Returns a list of permitted headers in a KBART phase II file
151
152
=cut
153
154
sub get_valid_headers {
155
    return (
156
        'publication_title',
157
        'print_identifier',
158
        'online_identifier',
159
        'date_first_issue_online',
160
        'num_first_vol_online',
161
        'num_first_issue_online',
162
        'date_last_issue_online',
163
        'num_last_vol_online',
164
        'num_last_issue_online',
165
        'title_url',
166
        'first_author',
167
        'title_id',
168
        'embargo_info',
169
        'coverage_depth',
170
        'coverage_notes',
171
        'publisher_name',
172
        'publication_type',
173
        'date_monograph_published_print',
174
        'date_monograph_published_online',
175
        'monograph_volume',
176
        'monograph_edition',
177
        'first_editor',
178
        'parent_publication_title_id',
179
        'preceding_publication_title_id',
180
        'access_type',
181
        'notes'
182
    );
183
}
184
185
186
=head3 calculate_chunked_params_size
187
188
Calculates average line size to work out how many lines to chunk a large file into
189
Uses only 75% of the max_allowed_packet as an upper limit
190
191
=cut
192
193
sub calculate_chunked_params_size {
194
    my ( $params_size, $max_allowed_packet, $number_of_rows ) = @_;
195
196
    my $average_line_size = $params_size / $number_of_rows;
197
    my $lines_possible    = ( $max_allowed_packet * 0.75 ) / $average_line_size;
198
    my $rounded_value     = floor($lines_possible);
199
    return $rounded_value;
200
}
201
202
203
=head3 is_file_too_large
204
205
Calculates the final size of the background job object that will need storing to check if we exceed the max_allowed_packet
206
207
=cut
208
209
sub is_file_too_large {
210
    my ( $params_to_store, $max_allowed_packet ) = @_;
211
212
    my $json           = JSON->new->utf8(0);
213
    my $encoded_params = $json->encode($params_to_store);
214
    my $params_size    = length $encoded_params;
215
216
    # A lot more than just the params are stored in the background job table and this is difficult to calculate
217
    # We should allow for no more than 75% of the max_allowed_packet to be made up of the job params to avoid db conflicts
218
    return {
219
        file_too_large => 1,
220
        params_size    => $params_size
221
    } if $params_size > ( $max_allowed_packet * 0.75 );
222
223
    return {
224
        file_too_large => 0,
225
        params_size    => $params_size
226
    };
227
}
228
229
230
=head3 _rescue_EBSCO_files
231
232
EBSCO have an incorrect spelling for "preceding_publication_title_id" in all of their KBART files (preceding is spelled with a double 'e').
233
This means all of their KBART files fail to import using the current methodology.
234
There is no simple way of finding out who the vendor is before importing so all KBART files from any vendor are going to have to be checked for this spelling and corrected.
235
236
=cut
237
238
sub _rescue_EBSCO_files {
239
    my ($column_headers) = @_;
240
241
    my ($index) = grep { @$column_headers[$_] eq 'preceeding_publication_title_id' } ( 0 .. @$column_headers - 1 );
242
    @$column_headers[$index] = 'preceding_publication_title_id' if $index;
243
244
    return $column_headers;
245
}
246
247
103
=head3 _detect_delimiter_and_quote
248
=head3 _detect_delimiter_and_quote
104
249
105
Identifies the delimiter and the quote character used in the KBART file and returns both.
250
Identifies the delimiter and the quote character used in the KBART file and returns both.
106
- 

Return to bug 36831