From b8cee496fd3885d8dcbb98e3eb53c5a133a439cd Mon Sep 17 00:00:00 2001 From: Olli-Antti Kivilahti Date: Tue, 9 Dec 2014 16:48:47 +0200 Subject: [PATCH] Bug 4283 - Merge bibliographic records. Followup. --- Koha/Deduplicator.pm | 56 ++++++++++++++++++++------- misc/maintenance/deduplicator.pl | 77 +++++++++++++++++++++++++++----------- 2 files changed, 99 insertions(+), 34 deletions(-) diff --git a/Koha/Deduplicator.pm b/Koha/Deduplicator.pm index ee58b41..47e30fd 100644 --- a/Koha/Deduplicator.pm +++ b/Koha/Deduplicator.pm @@ -89,31 +89,39 @@ sub deduplicate { my $verbose = $self->{verbose}; my $biblionumbers = C4::Biblio::GetBiblionumberSlice( $self->{limit}, $self->{offset}, $self->{biblionumber} ); - my @duplicates; + $self->{duplicates} = []; foreach my $biblionumber (@$biblionumbers) { my $marc = C4::Biblio::GetMarcBiblio($biblionumber); my @matches = $self->{matcher}->get_matches( $marc, $self->{max_matches} ); if (scalar(@matches) > 1) { - foreach my $match (@matches) { + for (my $i=0 ; $i{record_id}); $match->{itemsCount} = $itemsCount; - _buildSlimBiblio($match->{record_id}, $match, C4::Biblio::GetMarcBiblio($match->{record_id})); + unless( _buildSlimBiblio($match->{record_id}, $match, C4::Biblio::GetMarcBiblio($match->{record_id})) ) { + #Sometimes we get an error where the marcxml is not available. + splice(@matches, $i, 1); + $i--; #Don't advance the iterator after this round or we will skip one record! + next(); + } if ($match->{record_id} == $biblionumber) { $match->{matchSource} = 'matchSource'; } - } my $biblio = _buildSlimBiblio($biblionumber, undef, $marc); + unless ($biblio) { #Sometimes we get an error where the marcxml is not available. + next(); + } $biblio->{matches} = \@matches; - push @duplicates, $biblio; + push @{$self->{duplicates}}, $biblio; } if ($verbose) { print $biblionumber."\n"; } } - return \@duplicates; + return $self->{duplicates}; } sub _buildSlimBiblio { @@ -127,7 +135,7 @@ sub _buildSlimBiblio { } if (not($marc)) { warn "C4::Deduplicator::_buildSlimBiblio(), No MARC::Record for bn:$biblionumber"; - return $biblio; + return undef; } $biblio->{marc} = $marc; @@ -153,8 +161,9 @@ sub _buildSlimBiblio { my $author = $marc->subfield('100','a'); $author = $marc->subfield('110','a') unless $author; - $biblio->{author} = $author; + $biblio->{author} = ($author) ? $author : ''; $biblio->{title} = join(' ', @titles); + $biblio->{title} = '' unless $biblio->{title}; return $biblio; } @@ -167,18 +176,18 @@ sub _buildSlimBiblio { sub batchMergeDuplicates { my ($self, $duplicates, $mergeTargetFindingAlgorithm) = @_; - my @errors; - _findMergeTargets($duplicates, $mergeTargetFindingAlgorithm, \@errors); + $self->{mergeErrors} = []; + _findMergeTargets($duplicates, $mergeTargetFindingAlgorithm, $self->{mergeErrors}); foreach my $duplicate (@$duplicates) { foreach my $match (@{$duplicate->{matches}}) { if ($match eq $duplicate->{'mergeTarget'}) { #Comparing Perl references, if htey point to the same object. next(); #Don't merge itself to oneself. } - merge($match, $duplicate->{'mergeTarget'}, \@errors); + merge($match, $duplicate->{'mergeTarget'}, $self->{mergeErrors}); } } - return \@errors if scalar @errors > 0; + return $self->{mergeErrors} if scalar @{$self->{mergeErrors}} > 0; return undef; } @@ -188,6 +197,9 @@ sub _findMergeTargets { if ($mergeTargetFindingAlgorithm eq 'newest') { _mergeTargetFindingAlgorithm_newest( $duplicates ); } + else { + warn "Unknown merge target finding algorithm given: '$mergeTargetFindingAlgorithm'"; + } } sub _mergeTargetFindingAlgorithm_newest { @@ -224,7 +236,6 @@ sub merge { my $dbh = C4::Context->dbh; my $sth; - # Creating a new record from the html code my $tobiblio = $mergeTarget->{biblionumber}; my $frombiblio = $match->{biblionumber}; if ($tobiblio == $frombiblio) { @@ -286,4 +297,23 @@ sub merge { push @$errors, $error if ($error); } } + +sub printDuplicatesAsText { + my ($self) = @_; + + foreach my $duplicate (@{$self->{duplicates}}) { + print 'Match source: '.$duplicate->{biblionumber}.' - '.$duplicate->{title}.' '.$duplicate->{author}."\n"; + foreach my $match (@{$duplicate->{matches}}) { + print $match->{record_id}.' - '.$match->{score}.' '.$match->{itemsCount}.' '.$match->{title}.' '.$match->{author}."\n"; + } + print "\n\n"; + } +} + +sub printMergesAsText { + my ($self) = @_; + foreach my $error (@{$self->{mergeErrors}}) { + print $error; + } +} 1; \ No newline at end of file diff --git a/misc/maintenance/deduplicator.pl b/misc/maintenance/deduplicator.pl index cda1804..ef9e4f0 100755 --- a/misc/maintenance/deduplicator.pl +++ b/misc/maintenance/deduplicator.pl @@ -26,13 +26,16 @@ use Koha::Deduplicator; use Getopt::Long qw(:config no_ignore_case); -my ($help, $verbose, $offset, $biblionumber, $matcher_id, $merge); +my ($help, $verbose, $biblionumber, $matcher_id, $merge); my $limit = 500; +my $chunk = 500; +my $offset = 0; GetOptions( 'h|help' => \$help, 'v|verbose' => \$verbose, 'l|limit:i' => \$limit, + 'c|chunk:i' => \$chunk, 'o|offset:i' => \$offset, 'b|biblionumber:i' => \$biblionumber, 'm|matcher:i' => \$matcher_id, @@ -52,9 +55,16 @@ This script has the following parameters : -l --limit How many biblios to check for duplicates. Is the SQL LIMIT-clause for gathering biblios to deduplicate. + Defaults to 500. To run through the whole DB, set a sufficiently + large number, like 999999999999999 :) + + -c --chunk How many records to process on one deduplicate->merge run. + Defaults to 500. Use this to prevent memory from running + out when deduplicating/merging large databases. -o --offset How many records to skip from the start. Is the SQL OFFSET-clause for gathering biblios to deduplicate. + Defaults to 0. -b --biblionumber From which biblionumber (inclusive) to start gathering the biblios to deduplicate. Obsoletes --offset @@ -94,30 +104,55 @@ elsif ($merge) { exit; } -my ($deduplicator, $initErrors) = Koha::Deduplicator->new( $matcher_id, $limit, $offset, $biblionumber, $verbose ); -if ($initErrors) { - print "Errors happened when creating the Deduplicator:\n"; - print join("\n", @$initErrors); - print "\n"; - print $usage; - exit; +my $lastOffset = $offset; +while ($lastOffset < $limit) { + + my $chunkSize = _calculateChunkSize($lastOffset, $chunk, $limit); + + runDeduplicateMergeChunk($matcher_id, $chunkSize, $lastOffset, $biblionumber, $verbose ); + + $lastOffset += $chunk; } -else { - my $duplicates = $deduplicator->deduplicate(); - foreach my $duplicate (@$duplicates) { - print 'Match source: '.$duplicate->{biblionumber}.' - '.$duplicate->{title}.' '.$duplicate->{author}."\n"; - foreach my $match (@{$duplicate->{matches}}) { - print $match->{record_id}.' - '.$match->{score}.' '.$match->{itemsCount}.' '.$match->{title}.' '.$match->{author}."\n"; - } - print "\n\n"; +=head _calculateChunkSize + + my $chunkSize = _calculateChunkSize($lastOffset, $chunk, $limit); + +It can be that the last chunk overflows the limit-paramter, thus leading to deduplicating/merging too many biblios. +We don't want that, so calculate the remaining chunk size to not exceed the given limit! +=cut +sub _calculateChunkSize { + my ($lastOffset, $chunk, $limit) = @_; + + my $chunkSize = $chunk; + if ($lastOffset + $chunk > $limit) { + $chunkSize = $limit - $lastOffset; } + return $chunkSize; +} + +sub runDeduplicateMergeChunk { + my ($matcher_id, $chunkSize, $offset, $biblionumber, $verbose ) = @_; + + my ($deduplicator, $initErrors) = Koha::Deduplicator->new( $matcher_id, $chunkSize, $offset, $biblionumber, $verbose ); + if ($initErrors) { + print "Errors happened when creating the Deduplicator:\n"; + print join("\n", @$initErrors); + print "\n"; + print $usage; + exit; + } + else { + my $duplicates = $deduplicator->deduplicate(); + + $deduplicator->printDuplicatesAsText(); - if ($merge && $duplicates) { - my $errors = $deduplicator->batchMergeDuplicates($duplicates, $merge); - if ($errors) { - foreach my $error (@$errors) { - print $error; + if ($merge && scalar(@$duplicates) > 0) { + my $errors = $deduplicator->batchMergeDuplicates($duplicates, $merge); + if ($errors) { + foreach my $error (@$errors) { + print $error; + } } } } -- 1.7.9.5