From ae97bbb6ebb416bb4e3c625a0ca08ebcb29a0863 Mon Sep 17 00:00:00 2001 From: Martin Fink Date: Wed, 30 Oct 2019 20:25:51 +0100 Subject: [PATCH 1/2] Use `slice::iter` instead of `into_iter` to avoid future breakage `an_array.into_iter()` currently just works because of the autoref feature, which then calls `<[T] as IntoIterator>::into_iter`. But in the future, arrays will implement `IntoIterator`, too. In order to avoid problems in the future, the call is replaced by `iter()` which is shorter and more explicit. --- src/learning/naive_bayes.rs | 6 +++--- src/learning/toolkit/regularization.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/learning/naive_bayes.rs b/src/learning/naive_bayes.rs index 8a08af2d..f01d1f02 100644 --- a/src/learning/naive_bayes.rs +++ b/src/learning/naive_bayes.rs @@ -186,7 +186,7 @@ impl NaiveBayes { fn find_class(row: &[f64]) -> LearningResult { // Find the `1` entry in the row - for (idx, r) in row.into_iter().enumerate() { + for (idx, r) in row.iter().enumerate() { if *r == 1f64 { return Ok(idx); } @@ -288,7 +288,7 @@ impl Distribution for Gaussian { let class_count = class_prior.len(); let mut log_lik = Vec::with_capacity(class_count); - for (i, item) in class_prior.into_iter().enumerate() { + for (i, item) in class_prior.iter().enumerate() { let joint_i = item.ln(); let n_ij = -0.5 * (self.sigma.select_rows(&[i]) * 2.0 * PI).apply(&|x| x.ln()).sum(); @@ -371,7 +371,7 @@ impl Distribution for Bernoulli { let mut per_class_row = Vec::with_capacity(class_count); let neg_prob_sum = neg_prob.sum_cols(); - for (idx, p) in class_prior.into_iter().enumerate() { + for (idx, p) in class_prior.iter().enumerate() { per_class_row.push(p.ln() + neg_prob_sum[idx]); } diff --git a/src/learning/toolkit/regularization.rs b/src/learning/toolkit/regularization.rs index 395d36bd..50a43acc 100644 --- a/src/learning/toolkit/regularization.rs +++ b/src/learning/toolkit/regularization.rs @@ -117,7 +117,7 @@ mod tests { assert!((a - (42f64 / 12f64)) < 1e-18); let true_grad = vec![-1., -1., -1., 1., 1., 1., 1., 1., 1., 1., 1., 1.] - .into_iter() + .iter() .map(|x| x / 12f64) .collect::>(); @@ -158,7 +158,7 @@ mod tests { let l1_true_grad = Matrix::new(3, 4, vec![-1., -1., -1., 1., 1., 1., 1., 1., 1., 1., 1., 1.] - .into_iter() + .iter() .map(|x| x / 12f64) .collect::>()); let l2_true_grad = &input_mat / 12f64; From d2f7eb4da1548a426c526239c49ee8c6e6f13aa3 Mon Sep 17 00:00:00 2001 From: Martin Fink Date: Sat, 2 Nov 2019 12:49:36 +0100 Subject: [PATCH 2/2] Change vec::iter back to vec::into_iter in tests --- src/learning/toolkit/regularization.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/learning/toolkit/regularization.rs b/src/learning/toolkit/regularization.rs index 50a43acc..395d36bd 100644 --- a/src/learning/toolkit/regularization.rs +++ b/src/learning/toolkit/regularization.rs @@ -117,7 +117,7 @@ mod tests { assert!((a - (42f64 / 12f64)) < 1e-18); let true_grad = vec![-1., -1., -1., 1., 1., 1., 1., 1., 1., 1., 1., 1.] - .iter() + .into_iter() .map(|x| x / 12f64) .collect::>(); @@ -158,7 +158,7 @@ mod tests { let l1_true_grad = Matrix::new(3, 4, vec![-1., -1., -1., 1., 1., 1., 1., 1., 1., 1., 1., 1.] - .iter() + .into_iter() .map(|x| x / 12f64) .collect::>()); let l2_true_grad = &input_mat / 12f64;