Core: serious bug in "update predictions"
This is related to #223 (closed) and #224 (closed).
The algorithm to update predicted peaks post-refinement is as follows:
int Refiner::updatePredictions(std::vector<Peak3D*>& peaks) const
{
nsxlog(Level::Info, "Refiner::updatePredictions");
const PeakFilter peak_filter;
std::vector<nsx::Peak3D*> filtered_peaks = peaks;
filtered_peaks = peak_filter.filterEnabled(peaks, true);
if (_first_refine)
filtered_peaks = peak_filter.filterIndexed(filtered_peaks, _cell.get());
else
filtered_peaks = peak_filter.filterIndexed(filtered_peaks);
int updated = 0;
for (nsx::Peak3D* peak : filtered_peaks) {
// find appropriate batch
const RefinementBatch* b = nullptr;
const double z = peak->shape().center()[2];
for (const auto& batch : _batches) {
if (batch.onlyContains(z)) {
b = &batch;
break;
}
}
// no appropriate batch
if (b == nullptr)
continue;
const auto* batch_cell = b->cell();
// update the position
const MillerIndex hkl(peak->q(), *batch_cell);
const ReciprocalVector q_pred(
hkl.rowVector().cast<double>() * batch_cell->reciprocalBasis());
const std::vector<DetectorEvent> events = algo::qVectorList2Events(
{q_pred}, peak->dataSet()->instrumentStates(), peak->dataSet()->detector(), _nframes);
// something wrong with new prediction...
if (events.size() != 1) {
peak->setSelected(false);
peak->setRejectionFlag(RejectionFlag::PredictionUpdateFailure);
continue;
}
peak->setShape(Ellipsoid(
{events[0].px, events[0].py, events[0].frame}, peak->shape().metric()));
++updated;
}
nsxlog(Level::Info, updated, " peaks updated");
return updated;
}
In particular, the following section:
const std::vector<DetectorEvent> events = algo::qVectorList2Events(
{q_pred}, peak->dataSet()->instrumentStates(), peak->dataSet()->detector(), _nframes);
// something wrong with new prediction...
if (events.size() != 1) {
peak->setSelected(false);
peak->setRejectionFlag(RejectionFlag::PredictionUpdateFailure);
continue;
}
This uses the same incorrect assumption as the old prediction algorithm, namely that there is a one-to-one mapping between peaks and detector events. In fact, many peaks cross the Ewald sphere more than once, and by simply rejecting any peak for which exactly one event is not found, we are discarding a lot of good peaks.