From 440bf3dc448ba043e80cc3a9b2a7f5c3947c0dce Mon Sep 17 00:00:00 2001 From: LTLA Date: Thu, 24 Oct 2024 23:00:48 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20tatami-i?= =?UTF-8?q?nc/tatami=5Fstats@d714d7f6b1e95ed7346551222a90fddb2dbacf02=20?= =?UTF-8?q?=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/counts_8hpp_source.html | 278 ++++++++--------- docs/index.html | 4 +- docs/ranges_8hpp_source.html | 507 +++++++++++++++---------------- docs/sums_8hpp_source.html | 334 +++++++++++---------- docs/variances_8hpp_source.html | 516 ++++++++++++++++---------------- 5 files changed, 830 insertions(+), 809 deletions(-) diff --git a/docs/counts_8hpp_source.html b/docs/counts_8hpp_source.html index d212e01..be652dd 100644 --- a/docs/counts_8hpp_source.html +++ b/docs/counts_8hpp_source.html @@ -178,170 +178,174 @@
111
112 for (Index_ x = 0; x < len; ++x) {
113 auto range = ext->fetch(xbuffer.data(), ibuffer.data());
-
114 for (Index_ j = 0; j < range.number; ++j) {
-
115 auto idx = range.index[j];
-
116 curoutput[idx] += condition(range.value[j]);
-
117 ++(nonzeros[idx]);
-
118 }
-
119 }
-
120
-
121 if (count_zero) {
-
122 for (int d = 0; d < dim; ++d) {
-
123 curoutput[d] += len - nonzeros[d];
-
124 }
-
125 }
-
126 }, otherdim, num_threads);
-
127
-
128 } else {
-
129 tatami::parallelize([&](int thread, Index_ start, Index_ len) -> void {
-
130 std::vector<Value_> xbuffer(dim);
-
131 auto ext = tatami::consecutive_extractor<false>(p, !row, start, len);
-
132 auto curoutput = threaded_output_ptrs[thread];
-
133
-
134 for (Index_ x = 0; x < len; ++x) {
-
135 auto ptr = ext->fetch(xbuffer.data());
-
136 for (Index_ j = 0; j < dim; ++j) {
-
137 curoutput[j] += condition(ptr[j]);
-
138 }
-
139 }
-
140 }, otherdim, num_threads);
-
141 }
-
142
-
143 for (int t = 1; t < num_threads; ++t) {
-
144 auto curoutput = threaded_output_ptrs[t];
-
145 for (Index_ d = 0; d < dim; ++d) {
-
146 output[d] += curoutput[d];
-
147 }
-
148 }
-
149 }
-
150}
+
114 SUBPAR_VECTORIZABLE
+
115 for (Index_ j = 0; j < range.number; ++j) {
+
116 auto idx = range.index[j];
+
117 curoutput[idx] += condition(range.value[j]);
+
118 ++(nonzeros[idx]);
+
119 }
+
120 }
+
121
+
122 if (count_zero) {
+
123 SUBPAR_VECTORIZABLE
+
124 for (int d = 0; d < dim; ++d) {
+
125 curoutput[d] += len - nonzeros[d];
+
126 }
+
127 }
+
128 }, otherdim, num_threads);
+
129
+
130 } else {
+
131 tatami::parallelize([&](int thread, Index_ start, Index_ len) -> void {
+
132 std::vector<Value_> xbuffer(dim);
+
133 auto ext = tatami::consecutive_extractor<false>(p, !row, start, len);
+
134 auto curoutput = threaded_output_ptrs[thread];
+
135
+
136 for (Index_ x = 0; x < len; ++x) {
+
137 auto ptr = ext->fetch(xbuffer.data());
+
138 SUBPAR_VECTORIZABLE
+
139 for (Index_ j = 0; j < dim; ++j) {
+
140 curoutput[j] += condition(ptr[j]);
+
141 }
+
142 }
+
143 }, otherdim, num_threads);
+
144 }
+
145
+
146 for (int t = 1; t < num_threads; ++t) {
+
147 auto curoutput = threaded_output_ptrs[t];
+
148 SUBPAR_VECTORIZABLE
+
149 for (Index_ d = 0; d < dim; ++d) {
+
150 output[d] += curoutput[d];
+
151 }
+
152 }
+
153 }
+
154}
-
151
-
156namespace nan {
-
157
-
-
161struct Options {
-
166 int num_threads = 1;
-
167};
+
155
+
160namespace nan {
+
161
+
+
165struct Options {
+
170 int num_threads = 1;
+
171};
-
168
-
181template<typename Value_, typename Index_, typename Output_>
-
-
182void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* output, const Options& nopt) {
-
183 counts::apply(row, p, output, nopt.num_threads, [](Value_ x) -> bool { return std::isnan(x); });
-
184}
+
172
+
185template<typename Value_, typename Index_, typename Output_>
+
+
186void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* output, const Options& nopt) {
+
187 counts::apply(row, p, output, nopt.num_threads, [](Value_ x) -> bool { return std::isnan(x); });
+
188}
-
185
-
198template<typename Output_ = int, typename Value_, typename Index_>
-
-
199std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p, const Options& nopt) {
-
200 std::vector<Output_> output(p->nrow());
-
201 apply(true, p, output.data(), nopt);
-
202 return output;
-
203}
+
189
+
202template<typename Output_ = int, typename Value_, typename Index_>
+
+
203std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p, const Options& nopt) {
+
204 std::vector<Output_> output(p->nrow());
+
205 apply(true, p, output.data(), nopt);
+
206 return output;
+
207}
-
204
-
215template<typename Output_ = int, typename Value_, typename Index_>
-
-
216std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p) {
-
217 return by_row(p, Options());
-
218}
+
208
+
219template<typename Output_ = int, typename Value_, typename Index_>
+
+
220std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p) {
+
221 return by_row(p, Options());
+
222}
-
219
-
233template<typename Output_ = int, typename Value_, typename Index_>
-
-
234std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p, const Options& nopt) {
-
235 std::vector<Output_> output(p->ncol());
-
236 apply(false, p, output.data(), nopt);
-
237 return output;
-
238}
+
223
+
237template<typename Output_ = int, typename Value_, typename Index_>
+
+
238std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p, const Options& nopt) {
+
239 std::vector<Output_> output(p->ncol());
+
240 apply(false, p, output.data(), nopt);
+
241 return output;
+
242}
-
239
-
252template<typename Output_ = int, typename Value_, typename Index_>
-
-
253std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p) {
-
254 return by_column(p, Options());
-
255}
+
243
+
256template<typename Output_ = int, typename Value_, typename Index_>
+
+
257std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p) {
+
258 return by_column(p, Options());
+
259}
-
256
-
257}
-
258
-
263namespace zero {
-
264
-
-
268struct Options {
-
273 int num_threads = 1;
-
274};
+
260
+
261}
+
262
+
267namespace zero {
+
268
+
+
272struct Options {
+
277 int num_threads = 1;
+
278};
-
275
-
288template<typename Value_, typename Index_, typename Output_>
-
-
289void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* output, const Options& zopt) {
-
290 counts::apply(row, p, output, zopt.num_threads, [](Value_ x) -> bool { return x == 0; });
-
291}
+
279
+
292template<typename Value_, typename Index_, typename Output_>
+
+
293void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* output, const Options& zopt) {
+
294 counts::apply(row, p, output, zopt.num_threads, [](Value_ x) -> bool { return x == 0; });
+
295}
-
292
-
304template<typename Output_ = int, typename Value_, typename Index_>
-
-
305std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p, const Options& zopt) {
-
306 std::vector<Output_> output(p->nrow());
-
307 apply(true, p, output.data(), zopt);
-
308 return output;
-
309}
+
296
+
308template<typename Output_ = int, typename Value_, typename Index_>
+
+
309std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p, const Options& zopt) {
+
310 std::vector<Output_> output(p->nrow());
+
311 apply(true, p, output.data(), zopt);
+
312 return output;
+
313}
-
310
-
323template<typename Output_ = int, typename Value_, typename Index_>
-
-
324std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p) {
-
325 return by_row(p, Options());
-
326}
+
314
+
327template<typename Output_ = int, typename Value_, typename Index_>
+
+
328std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p) {
+
329 return by_row(p, Options());
+
330}
-
327
-
341template<typename Output_ = int, typename Value_, typename Index_>
-
-
342std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p, const Options& zopt) {
-
343 std::vector<Output_> output(p->ncol());
-
344 apply(false, p, output.data(), zopt);
-
345 return output;
-
346}
+
331
+
345template<typename Output_ = int, typename Value_, typename Index_>
+
+
346std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p, const Options& zopt) {
+
347 std::vector<Output_> output(p->ncol());
+
348 apply(false, p, output.data(), zopt);
+
349 return output;
+
350}
-
347
-
358template<typename Output_ = int, typename Value_, typename Index_>
-
-
359std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p) {
-
360 return by_column(p, Options());
-
361}
-
-
362
-
363}
-
364
+
351
+
362template<typename Output_ = int, typename Value_, typename Index_>
+
+
363std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p) {
+
364 return by_column(p, Options());
365}
+
366
367}
-
368
-
369#endif
+
369}
+
370
+
371}
+
+
372
+
373#endif
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
virtual bool prefer_rows() const=0
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
-
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &nopt)
Definition counts.hpp:199
-
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &nopt)
Definition counts.hpp:182
-
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &nopt)
Definition counts.hpp:234
-
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &zopt)
Definition counts.hpp:305
-
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &zopt)
Definition counts.hpp:342
-
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &zopt)
Definition counts.hpp:289
+
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &nopt)
Definition counts.hpp:203
+
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &nopt)
Definition counts.hpp:186
+
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &nopt)
Definition counts.hpp:238
+
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &zopt)
Definition counts.hpp:309
+
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &zopt)
Definition counts.hpp:346
+
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &zopt)
Definition counts.hpp:293
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, int num_threads, Condition_ condition)
Definition counts.hpp:44
Functions to compute statistics from a tatami::Matrix.
Definition counts.hpp:18
void parallelize(Function_ fun, Index_ tasks, int threads)
auto consecutive_extractor(const Matrix< Value_, Index_ > *mat, bool row, Index_ iter_start, Index_ iter_length, Args_ &&... args)
bool sparse_ordered_index
-
NaN-counting options.
Definition counts.hpp:161
-
int num_threads
Definition counts.hpp:166
-
Zero-counting options.
Definition counts.hpp:268
-
int num_threads
Definition counts.hpp:273
+
NaN-counting options.
Definition counts.hpp:165
+
int num_threads
Definition counts.hpp:170
+
Zero-counting options.
Definition counts.hpp:272
+
int num_threads
Definition counts.hpp:277
diff --git a/docs/index.html b/docs/index.html index 03bbb83..cc6c38e 100644 --- a/docs/index.html +++ b/docs/index.html @@ -102,7 +102,7 @@

Quick start

auto col_mean_and_var = tatami_stats::variances::by_column(mat.get(), vopt);
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &mopt)
Definition medians.hpp:315
-
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &vopt)
Definition variances.hpp:480
+
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &vopt)
Definition variances.hpp:488
Variance calculation options.
Definition variances.hpp:30
int num_threads
Definition variances.hpp:41
bool skip_nan
Definition variances.hpp:35
@@ -113,7 +113,7 @@

Lower-level functionality

std::vector<double> my_output(mat->nrow());
tatami_stats::sums::apply(/* row = */ true, mat.get(), output.data(), sopt);
-
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &sopt)
Definition sums.hpp:207
+
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &sopt)
Definition sums.hpp:211
Summation options.
Definition sums.hpp:28

Some of the algorithms expose low-level functions for even more fine-grained control. For example, we can manage the loop over the matrix rows ourselves, computing the mean and median for each row:

auto ext = mat->dense_row();
diff --git a/docs/ranges_8hpp_source.html b/docs/ranges_8hpp_source.html index ee51479..eac026d 100644 --- a/docs/ranges_8hpp_source.html +++ b/docs/ranges_8hpp_source.html @@ -203,266 +203,271 @@
190 ::tatami_stats::internal::nanable_ifelse<Value_>(
191 my_skip_nan,
192 [&]() {
-
193 for (Index_ i = 0; i < my_num; ++i, ++ptr) {
-
194 auto val = *ptr;
-
195 if (std::isnan(val)) {
-
196 my_store[i] = internal::choose_placeholder<minimum_, Value_>();
-
197 } else {
-
198 my_store[i] = val;
-
199 }
-
200 }
-
201 },
-
202 [&]() {
-
203 std::copy_n(ptr, my_num, my_store);
-
204 }
-
205 );
-
206
-
207 } else {
-
208 for (Index_ i = 0; i < my_num; ++i, ++ptr) {
-
209 auto val = *ptr;
-
210 if (internal::is_better<minimum_>(my_store[i], val)) { // this should implicitly skip NaNs, any NaN comparison will be false.
-
211 my_store[i] = val;
-
212 }
-
213 }
-
214 }
-
215 }
+
193 SUBPAR_VECTORIZABLE
+
194 for (Index_ i = 0; i < my_num; ++i, ++ptr) {
+
195 auto val = *ptr;
+
196 if (std::isnan(val)) {
+
197 my_store[i] = internal::choose_placeholder<minimum_, Value_>();
+
198 } else {
+
199 my_store[i] = val;
+
200 }
+
201 }
+
202 },
+
203 [&]() {
+
204 std::copy_n(ptr, my_num, my_store);
+
205 }
+
206 );
+
207
+
208 } else {
+
209 SUBPAR_VECTORIZABLE
+
210 for (Index_ i = 0; i < my_num; ++i, ++ptr) {
+
211 auto val = *ptr;
+
212 if (internal::is_better<minimum_>(my_store[i], val)) { // this should implicitly skip NaNs, any NaN comparison will be false.
+
213 my_store[i] = val;
+
214 }
+
215 }
+
216 }
+
217 }
-
216
-
-
220 void finish() {
-
221 if (my_init) {
-
222 std::fill_n(my_store, my_num, internal::choose_placeholder<minimum_, Value_>());
-
223 }
-
224 }
+
218
+
+
222 void finish() {
+
223 if (my_init) {
+
224 std::fill_n(my_store, my_num, internal::choose_placeholder<minimum_, Value_>());
+
225 }
+
226 }
-
225
-
226private:
-
227 bool my_init = true;
-
228 Index_ my_num;
-
229 Output_* my_store;
-
230 bool my_skip_nan;
-
231};
+
227
+
228private:
+
229 bool my_init = true;
+
230 Index_ my_num;
+
231 Output_* my_store;
+
232 bool my_skip_nan;
+
233};
-
232
-
245template<bool minimum_, typename Output_, typename Value_, typename Index_>
-
- -
247public:
-
-
257 RunningSparse(Index_ num, Output_* store, bool skip_nan, Index_ subtract = 0) :
-
258 my_num(num), my_store(store), my_skip_nan(skip_nan), my_subtract(subtract) {}
+
234
+
247template<bool minimum_, typename Output_, typename Value_, typename Index_>
+
+ +
249public:
+
+
259 RunningSparse(Index_ num, Output_* store, bool skip_nan, Index_ subtract = 0) :
+
260 my_num(num), my_store(store), my_skip_nan(skip_nan), my_subtract(subtract) {}
-
259
-
-
266 void add(const Value_* value, const Index_* index, Index_ number) {
-
267 if (my_count == 0) {
-
268 my_nonzero.resize(my_num);
-
269 std::fill_n(my_store, my_num, internal::choose_placeholder<minimum_, Value_>());
-
270
-
271 if (!my_skip_nan) {
-
272 for (Index_ i = 0; i < number; ++i, ++value, ++index) {
-
273 auto val = *value;
-
274 auto idx = *index - my_subtract;
-
275 my_store[idx] = val;
-
276 ++my_nonzero[idx];
-
277 }
-
278 my_count = 1;
-
279 return;
-
280 }
-
281 }
-
282
-
283 for (Index_ i = 0; i < number; ++i, ++value, ++index) {
-
284 auto val = *value;
-
285 auto idx = *index - my_subtract;
-
286 auto& current = my_store[idx];
-
287 if (internal::is_better<minimum_>(current, val)) { // this should implicitly skip NaNs, any NaN comparison will be false.
-
288 current = val;
-
289 }
-
290 ++my_nonzero[idx];
-
291 }
-
292
-
293 ++my_count;
-
294 }
+
261
+
+
268 void add(const Value_* value, const Index_* index, Index_ number) {
+
269 if (my_count == 0) {
+
270 my_nonzero.resize(my_num);
+
271 std::fill_n(my_store, my_num, internal::choose_placeholder<minimum_, Value_>());
+
272
+
273 if (!my_skip_nan) {
+
274 SUBPAR_VECTORIZABLE
+
275 for (Index_ i = 0; i < number; ++i, ++value, ++index) {
+
276 auto val = *value;
+
277 auto idx = *index - my_subtract;
+
278 my_store[idx] = val;
+
279 ++my_nonzero[idx];
+
280 }
+
281 my_count = 1;
+
282 return;
+
283 }
+
284 }
+
285
+
286 SUBPAR_VECTORIZABLE
+
287 for (Index_ i = 0; i < number; ++i, ++value, ++index) {
+
288 auto val = *value;
+
289 auto idx = *index - my_subtract;
+
290 auto& current = my_store[idx];
+
291 if (internal::is_better<minimum_>(current, val)) { // this should implicitly skip NaNs, any NaN comparison will be false.
+
292 current = val;
+
293 }
+
294 ++my_nonzero[idx];
+
295 }
+
296
+
297 ++my_count;
+
298 }
-
295
-
-
299 void finish() {
-
300 if (my_count) {
-
301 for (Index_ i = 0; i < my_num; ++i) {
-
302 if (my_count > my_nonzero[i]) {
-
303 auto& current = my_store[i];
-
304 if (internal::is_better<minimum_>(current, 0)) {
-
305 current = 0;
-
306 }
-
307 }
-
308 }
-
309 } else {
-
310 std::fill_n(my_store, my_num, internal::choose_placeholder<minimum_, Value_>());
-
311 }
-
312 }
+
299
+
+
303 void finish() {
+
304 if (my_count) {
+
305 SUBPAR_VECTORIZABLE
+
306 for (Index_ i = 0; i < my_num; ++i) {
+
307 if (my_count > my_nonzero[i]) {
+
308 auto& current = my_store[i];
+
309 if (internal::is_better<minimum_>(current, 0)) {
+
310 current = 0;
+
311 }
+
312 }
+
313 }
+
314 } else {
+
315 std::fill_n(my_store, my_num, internal::choose_placeholder<minimum_, Value_>());
+
316 }
+
317 }
-
313
-
314private:
-
315 Index_ my_num;
-
316 Output_* my_store;
-
317 bool my_skip_nan;
-
318 Index_ my_subtract;
-
319 Index_ my_count = 0;
-
320 std::vector<Index_> my_nonzero;
-
321};
+
318
+
319private:
+
320 Index_ my_num;
+
321 Output_* my_store;
+
322 bool my_skip_nan;
+
323 Index_ my_subtract;
+
324 Index_ my_count = 0;
+
325 std::vector<Index_> my_nonzero;
+
326};
-
322
-
341template<typename Value_, typename Index_, typename Output_>
-
-
342void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* min_out, Output_* max_out, const Options& ropt) {
-
343 auto dim = (row ? p->nrow() : p->ncol());
-
344 auto otherdim = (row ? p->ncol() : p->nrow());
-
345 const bool direct = p->prefer_rows() == row;
-
346
-
347 bool store_min = min_out != NULL;
-
348 bool store_max = max_out != NULL;
-
349
-
350 if (p->sparse()) {
-
351 tatami::Options opt;
-
352 opt.sparse_ordered_index = false;
-
353
-
354 if (direct) {
-
355 opt.sparse_extract_index = false;
-
356 tatami::parallelize([&](int, Index_ s, Index_ l) {
-
357 auto ext = tatami::consecutive_extractor<true>(p, row, s, l, opt);
-
358 std::vector<Value_> vbuffer(otherdim);
-
359 for (Index_ x = 0; x < l; ++x) {
-
360 auto out = ext->fetch(vbuffer.data(), NULL);
-
361 if (store_min) {
-
362 min_out[x + s] = ranges::direct<true>(out.value, out.number, otherdim, ropt.skip_nan);
-
363 }
-
364 if (store_max) {
-
365 max_out[x + s] = ranges::direct<false>(out.value, out.number, otherdim, ropt.skip_nan);
-
366 }
-
367 }
-
368 }, dim, ropt.num_threads);
-
369
-
370 } else {
-
371 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
-
372 auto ext = tatami::consecutive_extractor<true>(p, !row, static_cast<Index_>(0), otherdim, s, l, opt);
-
373 std::vector<Value_> vbuffer(l);
-
374 std::vector<Index_> ibuffer(l);
-
375
-
376 auto local_min = (store_min ? LocalOutputBuffer<Output_>(thread, s, l, min_out) : LocalOutputBuffer<Output_>());
-
377 auto local_max = (store_max ? LocalOutputBuffer<Output_>(thread, s, l, max_out) : LocalOutputBuffer<Output_>());
-
378 ranges::RunningSparse<true, Output_, Value_, Index_> runmin(l, local_min.data(), ropt.skip_nan, s);
-
379 ranges::RunningSparse<false, Output_, Value_, Index_> runmax(l, local_max.data(), ropt.skip_nan, s);
+
327
+
346template<typename Value_, typename Index_, typename Output_>
+
+
347void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* min_out, Output_* max_out, const Options& ropt) {
+
348 auto dim = (row ? p->nrow() : p->ncol());
+
349 auto otherdim = (row ? p->ncol() : p->nrow());
+
350 const bool direct = p->prefer_rows() == row;
+
351
+
352 bool store_min = min_out != NULL;
+
353 bool store_max = max_out != NULL;
+
354
+
355 if (p->sparse()) {
+
356 tatami::Options opt;
+
357 opt.sparse_ordered_index = false;
+
358
+
359 if (direct) {
+
360 opt.sparse_extract_index = false;
+
361 tatami::parallelize([&](int, Index_ s, Index_ l) {
+
362 auto ext = tatami::consecutive_extractor<true>(p, row, s, l, opt);
+
363 std::vector<Value_> vbuffer(otherdim);
+
364 for (Index_ x = 0; x < l; ++x) {
+
365 auto out = ext->fetch(vbuffer.data(), NULL);
+
366 if (store_min) {
+
367 min_out[x + s] = ranges::direct<true>(out.value, out.number, otherdim, ropt.skip_nan);
+
368 }
+
369 if (store_max) {
+
370 max_out[x + s] = ranges::direct<false>(out.value, out.number, otherdim, ropt.skip_nan);
+
371 }
+
372 }
+
373 }, dim, ropt.num_threads);
+
374
+
375 } else {
+
376 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
+
377 auto ext = tatami::consecutive_extractor<true>(p, !row, static_cast<Index_>(0), otherdim, s, l, opt);
+
378 std::vector<Value_> vbuffer(l);
+
379 std::vector<Index_> ibuffer(l);
380
-
381 for (Index_ x = 0; x < otherdim; ++x) {
-
382 auto out = ext->fetch(vbuffer.data(), ibuffer.data());
-
383 if (store_min) {
-
384 runmin.add(out.value, out.index, out.number);
-
385 }
-
386 if (store_max) {
-
387 runmax.add(out.value, out.index, out.number);
-
388 }
-
389 }
-
390
-
391 if (store_min) {
-
392 runmin.finish();
-
393 local_min.transfer();
+
381 auto local_min = (store_min ? LocalOutputBuffer<Output_>(thread, s, l, min_out) : LocalOutputBuffer<Output_>());
+
382 auto local_max = (store_max ? LocalOutputBuffer<Output_>(thread, s, l, max_out) : LocalOutputBuffer<Output_>());
+
383 ranges::RunningSparse<true, Output_, Value_, Index_> runmin(l, local_min.data(), ropt.skip_nan, s);
+
384 ranges::RunningSparse<false, Output_, Value_, Index_> runmax(l, local_max.data(), ropt.skip_nan, s);
+
385
+
386 for (Index_ x = 0; x < otherdim; ++x) {
+
387 auto out = ext->fetch(vbuffer.data(), ibuffer.data());
+
388 if (store_min) {
+
389 runmin.add(out.value, out.index, out.number);
+
390 }
+
391 if (store_max) {
+
392 runmax.add(out.value, out.index, out.number);
+
393 }
394 }
-
395 if (store_max) {
-
396 runmax.finish();
-
397 local_max.transfer();
-
398 }
-
399 }, dim, ropt.num_threads);
-
400 }
-
401
-
402 } else {
-
403 if (direct) {
-
404 tatami::parallelize([&](int, Index_ s, Index_ l) {
-
405 auto ext = tatami::consecutive_extractor<false>(p, row, s, l);
-
406 std::vector<Value_> buffer(otherdim);
-
407 for (Index_ x = 0; x < l; ++x) {
-
408 auto ptr = ext->fetch(buffer.data());
-
409 if (store_min) {
-
410 min_out[x + s] = ranges::direct<true>(ptr, otherdim, ropt.skip_nan);
-
411 }
-
412 if (store_max) {
-
413 max_out[x + s] = ranges::direct<false>(ptr, otherdim, ropt.skip_nan);
-
414 }
-
415 }
-
416 }, dim, ropt.num_threads);
-
417
-
418 } else {
-
419 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
-
420 auto ext = tatami::consecutive_extractor<false>(p, !row, static_cast<Index_>(0), otherdim, s, l);
-
421 std::vector<Value_> buffer(l);
+
395
+
396 if (store_min) {
+
397 runmin.finish();
+
398 local_min.transfer();
+
399 }
+
400 if (store_max) {
+
401 runmax.finish();
+
402 local_max.transfer();
+
403 }
+
404 }, dim, ropt.num_threads);
+
405 }
+
406
+
407 } else {
+
408 if (direct) {
+
409 tatami::parallelize([&](int, Index_ s, Index_ l) {
+
410 auto ext = tatami::consecutive_extractor<false>(p, row, s, l);
+
411 std::vector<Value_> buffer(otherdim);
+
412 for (Index_ x = 0; x < l; ++x) {
+
413 auto ptr = ext->fetch(buffer.data());
+
414 if (store_min) {
+
415 min_out[x + s] = ranges::direct<true>(ptr, otherdim, ropt.skip_nan);
+
416 }
+
417 if (store_max) {
+
418 max_out[x + s] = ranges::direct<false>(ptr, otherdim, ropt.skip_nan);
+
419 }
+
420 }
+
421 }, dim, ropt.num_threads);
422
-
423 auto local_min = (store_min ? LocalOutputBuffer<Output_>(thread, s, l, min_out) : LocalOutputBuffer<Output_>());
-
424 auto local_max = (store_max ? LocalOutputBuffer<Output_>(thread, s, l, max_out) : LocalOutputBuffer<Output_>());
-
425 ranges::RunningDense<true, Output_, Value_, Index_> runmin(l, local_min.data(), ropt.skip_nan);
-
426 ranges::RunningDense<false, Output_, Value_, Index_> runmax(l, local_max.data(), ropt.skip_nan);
+
423 } else {
+
424 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
+
425 auto ext = tatami::consecutive_extractor<false>(p, !row, static_cast<Index_>(0), otherdim, s, l);
+
426 std::vector<Value_> buffer(l);
427
-
428 for (Index_ x = 0; x < otherdim; ++x) {
-
429 auto ptr = ext->fetch(buffer.data());
-
430 if (store_min) {
-
431 runmin.add(ptr);
-
432 }
-
433 if (store_max) {
-
434 runmax.add(ptr);
-
435 }
-
436 }
-
437
-
438 if (store_min) {
-
439 runmin.finish();
-
440 local_min.transfer();
+
428 auto local_min = (store_min ? LocalOutputBuffer<Output_>(thread, s, l, min_out) : LocalOutputBuffer<Output_>());
+
429 auto local_max = (store_max ? LocalOutputBuffer<Output_>(thread, s, l, max_out) : LocalOutputBuffer<Output_>());
+
430 ranges::RunningDense<true, Output_, Value_, Index_> runmin(l, local_min.data(), ropt.skip_nan);
+
431 ranges::RunningDense<false, Output_, Value_, Index_> runmax(l, local_max.data(), ropt.skip_nan);
+
432
+
433 for (Index_ x = 0; x < otherdim; ++x) {
+
434 auto ptr = ext->fetch(buffer.data());
+
435 if (store_min) {
+
436 runmin.add(ptr);
+
437 }
+
438 if (store_max) {
+
439 runmax.add(ptr);
+
440 }
441 }
-
442 if (store_max) {
-
443 runmax.finish();
-
444 local_max.transfer();
-
445 }
-
446 }, dim, ropt.num_threads);
-
447 }
-
448 }
-
449
-
450 return;
-
451}
+
442
+
443 if (store_min) {
+
444 runmin.finish();
+
445 local_min.transfer();
+
446 }
+
447 if (store_max) {
+
448 runmax.finish();
+
449 local_max.transfer();
+
450 }
+
451 }, dim, ropt.num_threads);
+
452 }
+
453 }
+
454
+
455 return;
+
456}
-
452
-
466template<typename Output_ = double, typename Value_, typename Index_>
-
-
467std::pair<std::vector<Output_>, std::vector<Output_> > by_column(const tatami::Matrix<Value_, Index_>* p, const Options& ropt) {
-
468 std::vector<Output_> mins(p->ncol()), maxs(p->ncol());
-
469 apply(false, p, mins.data(), maxs.data(), ropt);
-
470 return std::make_pair(std::move(mins), std::move(maxs));
-
471}
+
457
+
471template<typename Output_ = double, typename Value_, typename Index_>
+
+
472std::pair<std::vector<Output_>, std::vector<Output_> > by_column(const tatami::Matrix<Value_, Index_>* p, const Options& ropt) {
+
473 std::vector<Output_> mins(p->ncol()), maxs(p->ncol());
+
474 apply(false, p, mins.data(), maxs.data(), ropt);
+
475 return std::make_pair(std::move(mins), std::move(maxs));
+
476}
-
472
-
485template<typename Output_ = double, typename Value_, typename Index_>
-
-
486std::pair<std::vector<Output_>, std::vector<Output_> > by_column(const tatami::Matrix<Value_, Index_>* p) {
-
487 return by_column<Output_>(p, Options());
-
488}
+
477
+
490template<typename Output_ = double, typename Value_, typename Index_>
+
+
491std::pair<std::vector<Output_>, std::vector<Output_> > by_column(const tatami::Matrix<Value_, Index_>* p) {
+
492 return by_column<Output_>(p, Options());
+
493}
-
489
-
503template<typename Output_ = double, typename Value_, typename Index_>
-
-
504std::pair<std::vector<Output_>, std::vector<Output_> > by_row(const tatami::Matrix<Value_, Index_>* p, const Options& ropt) {
-
505 std::vector<Output_> mins(p->nrow()), maxs(p->nrow());
-
506 apply(true, p, mins.data(), maxs.data(), ropt);
-
507 return std::make_pair(std::move(mins), std::move(maxs));
-
508}
+
494
+
508template<typename Output_ = double, typename Value_, typename Index_>
+
+
509std::pair<std::vector<Output_>, std::vector<Output_> > by_row(const tatami::Matrix<Value_, Index_>* p, const Options& ropt) {
+
510 std::vector<Output_> mins(p->nrow()), maxs(p->nrow());
+
511 apply(true, p, mins.data(), maxs.data(), ropt);
+
512 return std::make_pair(std::move(mins), std::move(maxs));
+
513}
-
509
-
522template<typename Output_ = double, typename Value_, typename Index_>
-
-
523std::pair<std::vector<Output_>, std::vector<Output_> > by_row(const tatami::Matrix<Value_, Index_>* p) {
-
524 return by_row<Output_>(p, Options());
-
525}
+
514
+
527template<typename Output_ = double, typename Value_, typename Index_>
+
+
528std::pair<std::vector<Output_>, std::vector<Output_> > by_row(const tatami::Matrix<Value_, Index_>* p) {
+
529 return by_row<Output_>(p, Options());
+
530}
-
526
-
527}
-
528
-
529}
-
530
-
531#endif
+
531
+
532}
+
533
+
534}
+
535
+
536#endif
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
@@ -470,17 +475,17 @@
virtual std::unique_ptr< MyopicSparseExtractor< Value_, Index_ > > sparse(bool row, const Options &opt) const=0
Local output buffer for running calculations.
Definition utils.hpp:72
Running minima/maxima from dense data.
Definition ranges.hpp:173
-
void finish()
Definition ranges.hpp:220
+
void finish()
Definition ranges.hpp:222
RunningDense(Index_ num, Output_ *store, bool skip_nan)
Definition ranges.hpp:181
void add(const Value_ *ptr)
Definition ranges.hpp:187
-
Running minima/maxima from sparse data.
Definition ranges.hpp:246
-
void finish()
Definition ranges.hpp:299
-
RunningSparse(Index_ num, Output_ *store, bool skip_nan, Index_ subtract=0)
Definition ranges.hpp:257
-
void add(const Value_ *value, const Index_ *index, Index_ number)
Definition ranges.hpp:266
-
std::pair< std::vector< Output_ >, std::vector< Output_ > > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &ropt)
Definition ranges.hpp:504
+
Running minima/maxima from sparse data.
Definition ranges.hpp:248
+
void finish()
Definition ranges.hpp:303
+
RunningSparse(Index_ num, Output_ *store, bool skip_nan, Index_ subtract=0)
Definition ranges.hpp:259
+
void add(const Value_ *value, const Index_ *index, Index_ number)
Definition ranges.hpp:268
+
std::pair< std::vector< Output_ >, std::vector< Output_ > > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &ropt)
Definition ranges.hpp:509
Value_ direct(const Value_ *ptr, Index_ num, bool skip_nan)
Definition ranges.hpp:97
-
std::pair< std::vector< Output_ >, std::vector< Output_ > > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &ropt)
Definition ranges.hpp:467
-
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *min_out, Output_ *max_out, const Options &ropt)
Definition ranges.hpp:342
+
std::pair< std::vector< Output_ >, std::vector< Output_ > > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &ropt)
Definition ranges.hpp:472
+
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *min_out, Output_ *max_out, const Options &ropt)
Definition ranges.hpp:347
Functions to compute statistics from a tatami::Matrix.
Definition counts.hpp:18
void parallelize(Function_ fun, Index_ tasks, int threads)
auto consecutive_extractor(const Matrix< Value_, Index_ > *mat, bool row, Index_ iter_start, Index_ iter_length, Args_ &&... args)
diff --git a/docs/sums_8hpp_source.html b/docs/sums_8hpp_source.html index 536a836..45befb8 100644 --- a/docs/sums_8hpp_source.html +++ b/docs/sums_8hpp_source.html @@ -145,178 +145,182 @@
108 internal::nanable_ifelse<Value_>(
109 my_skip_nan,
110 [&]() {
-
111 for (Index_ i = 0; i < my_num; ++i) {
-
112 auto val = ptr[i];
-
113 if (!std::isnan(val)) {
-
114 my_sum[i] += val;
-
115 }
-
116 }
-
117 },
-
118 [&]() {
-
119 for (Index_ i = 0; i < my_num; ++i) {
-
120 my_sum[i] += ptr[i];
-
121 }
-
122 }
-
123 );
-
124 }
+
111 SUBPAR_VECTORIZABLE
+
112 for (Index_ i = 0; i < my_num; ++i) {
+
113 auto val = ptr[i];
+
114 if (!std::isnan(val)) {
+
115 my_sum[i] += val;
+
116 }
+
117 }
+
118 },
+
119 [&]() {
+
120 SUBPAR_VECTORIZABLE
+
121 for (Index_ i = 0; i < my_num; ++i) {
+
122 my_sum[i] += ptr[i];
+
123 }
+
124 }
+
125 );
+
126 }
-
125
-
126private:
-
127 Index_ my_num;
-
128 Output_* my_sum;
-
129 bool my_skip_nan;
-
130};
+
127
+
128private:
+
129 Index_ my_num;
+
130 Output_* my_sum;
+
131 bool my_skip_nan;
+
132};
-
131
-
142template<typename Output_, typename Value_, typename Index_>
-
- -
144public:
-
-
153 RunningSparse(Output_* sum, bool skip_nan, Index_ subtract = 0) :
-
154 my_sum(sum), my_skip_nan(skip_nan), my_subtract(subtract) {}
+
133
+
144template<typename Output_, typename Value_, typename Index_>
+
+ +
146public:
+
+
155 RunningSparse(Output_* sum, bool skip_nan, Index_ subtract = 0) :
+
156 my_sum(sum), my_skip_nan(skip_nan), my_subtract(subtract) {}
-
155
-
-
163 void add(const Value_* value, const Index_* index, Index_ number) {
-
164 internal::nanable_ifelse<Value_>(
-
165 my_skip_nan,
-
166 [&]() {
-
167 for (Index_ i = 0; i < number; ++i) {
-
168 auto val = value[i];
-
169 if (!std::isnan(val)) {
-
170 my_sum[index[i] - my_subtract] += val;
-
171 }
-
172 }
-
173 },
-
174 [&]() {
-
175 for (Index_ i = 0; i < number; ++i) {
-
176 my_sum[index[i] - my_subtract] += value[i];
-
177 }
-
178 }
-
179 );
-
180 }
+
157
+
+
165 void add(const Value_* value, const Index_* index, Index_ number) {
+
166 internal::nanable_ifelse<Value_>(
+
167 my_skip_nan,
+
168 [&]() {
+
169 SUBPAR_VECTORIZABLE
+
170 for (Index_ i = 0; i < number; ++i) {
+
171 auto val = value[i];
+
172 if (!std::isnan(val)) {
+
173 my_sum[index[i] - my_subtract] += val;
+
174 }
+
175 }
+
176 },
+
177 [&]() {
+
178 SUBPAR_VECTORIZABLE
+
179 for (Index_ i = 0; i < number; ++i) {
+
180 my_sum[index[i] - my_subtract] += value[i];
+
181 }
+
182 }
+
183 );
+
184 }
-
181
-
182private:
-
183 Output_* my_sum;
-
184 bool my_skip_nan;
-
185 Index_ my_subtract;
-
186};
+
185
+
186private:
+
187 Output_* my_sum;
+
188 bool my_skip_nan;
+
189 Index_ my_subtract;
+
190};
-
187
-
206template<typename Value_, typename Index_, typename Output_>
-
-
207void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* output, const Options& sopt) {
-
208 auto dim = (row ? p->nrow() : p->ncol());
-
209 auto otherdim = (row ? p->ncol() : p->nrow());
-
210 const bool direct = p->prefer_rows() == row;
-
211
-
212 if (p->sparse()) {
-
213 if (direct) {
-
214 tatami::Options opt;
-
215 opt.sparse_extract_index = false;
-
216
-
217 tatami::parallelize([&](int, Index_ s, Index_ l) {
-
218 auto ext = tatami::consecutive_extractor<true>(p, row, s, l, opt);
-
219 std::vector<Value_> vbuffer(otherdim);
-
220 for (Index_ x = 0; x < l; ++x) {
-
221 auto out = ext->fetch(vbuffer.data(), NULL);
-
222 output[x + s] = sums::direct(out.value, out.number, sopt.skip_nan);
-
223 }
-
224 }, dim, sopt.num_threads);
-
225
-
226 } else {
-
227 tatami::Options opt;
-
228 opt.sparse_ordered_index = false;
+
191
+
210template<typename Value_, typename Index_, typename Output_>
+
+
211void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* output, const Options& sopt) {
+
212 auto dim = (row ? p->nrow() : p->ncol());
+
213 auto otherdim = (row ? p->ncol() : p->nrow());
+
214 const bool direct = p->prefer_rows() == row;
+
215
+
216 if (p->sparse()) {
+
217 if (direct) {
+
218 tatami::Options opt;
+
219 opt.sparse_extract_index = false;
+
220
+
221 tatami::parallelize([&](int, Index_ s, Index_ l) {
+
222 auto ext = tatami::consecutive_extractor<true>(p, row, s, l, opt);
+
223 std::vector<Value_> vbuffer(otherdim);
+
224 for (Index_ x = 0; x < l; ++x) {
+
225 auto out = ext->fetch(vbuffer.data(), NULL);
+
226 output[x + s] = sums::direct(out.value, out.number, sopt.skip_nan);
+
227 }
+
228 }, dim, sopt.num_threads);
229
-
230 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
-
231 auto ext = tatami::consecutive_extractor<true>(p, !row, static_cast<Index_>(0), otherdim, s, l, opt);
-
232 std::vector<Value_> vbuffer(l);
-
233 std::vector<Index_> ibuffer(l);
-
234
-
235 LocalOutputBuffer<Output_> local_output(thread, s, l, output);
-
236 sums::RunningSparse<Output_, Value_, Index_> runner(local_output.data(), sopt.skip_nan, s);
-
237
-
238 for (Index_ x = 0; x < otherdim; ++x) {
-
239 auto out = ext->fetch(vbuffer.data(), ibuffer.data());
-
240 runner.add(out.value, out.index, out.number);
-
241 }
-
242
-
243 local_output.transfer();
-
244 }, dim, sopt.num_threads);
-
245 }
+
230 } else {
+
231 tatami::Options opt;
+
232 opt.sparse_ordered_index = false;
+
233
+
234 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
+
235 auto ext = tatami::consecutive_extractor<true>(p, !row, static_cast<Index_>(0), otherdim, s, l, opt);
+
236 std::vector<Value_> vbuffer(l);
+
237 std::vector<Index_> ibuffer(l);
+
238
+
239 LocalOutputBuffer<Output_> local_output(thread, s, l, output);
+
240 sums::RunningSparse<Output_, Value_, Index_> runner(local_output.data(), sopt.skip_nan, s);
+
241
+
242 for (Index_ x = 0; x < otherdim; ++x) {
+
243 auto out = ext->fetch(vbuffer.data(), ibuffer.data());
+
244 runner.add(out.value, out.index, out.number);
+
245 }
246
-
247 } else {
-
248 if (direct) {
-
249 tatami::parallelize([&](int, Index_ s, Index_ l) {
-
250 auto ext = tatami::consecutive_extractor<false>(p, row, s, l);
-
251 std::vector<Value_> buffer(otherdim);
-
252 for (Index_ x = 0; x < l; ++x) {
-
253 auto out = ext->fetch(buffer.data());
-
254 output[x + s] = sums::direct(out, otherdim, sopt.skip_nan);
-
255 }
-
256 }, dim, sopt.num_threads);
-
257
-
258 } else {
-
259 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
-
260 auto ext = tatami::consecutive_extractor<false>(p, !row, static_cast<Index_>(0), otherdim, s, l);
-
261 std::vector<Value_> buffer(l);
-
262
-
263 LocalOutputBuffer<Output_> local_output(thread, s, l, output);
-
264 sums::RunningDense<Output_, Value_, Index_> runner(l, local_output.data(), sopt.skip_nan);
-
265
-
266 for (Index_ x = 0; x < otherdim; ++x) {
-
267 auto out = ext->fetch(buffer.data());
-
268 runner.add(out);
-
269 }
-
270
-
271 local_output.transfer();
-
272 }, dim, sopt.num_threads);
-
273 }
-
274 }
-
275
-
276 return;
-
277}
+
247 local_output.transfer();
+
248 }, dim, sopt.num_threads);
+
249 }
+
250
+
251 } else {
+
252 if (direct) {
+
253 tatami::parallelize([&](int, Index_ s, Index_ l) {
+
254 auto ext = tatami::consecutive_extractor<false>(p, row, s, l);
+
255 std::vector<Value_> buffer(otherdim);
+
256 for (Index_ x = 0; x < l; ++x) {
+
257 auto out = ext->fetch(buffer.data());
+
258 output[x + s] = sums::direct(out, otherdim, sopt.skip_nan);
+
259 }
+
260 }, dim, sopt.num_threads);
+
261
+
262 } else {
+
263 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
+
264 auto ext = tatami::consecutive_extractor<false>(p, !row, static_cast<Index_>(0), otherdim, s, l);
+
265 std::vector<Value_> buffer(l);
+
266
+
267 LocalOutputBuffer<Output_> local_output(thread, s, l, output);
+
268 sums::RunningDense<Output_, Value_, Index_> runner(l, local_output.data(), sopt.skip_nan);
+
269
+
270 for (Index_ x = 0; x < otherdim; ++x) {
+
271 auto out = ext->fetch(buffer.data());
+
272 runner.add(out);
+
273 }
+
274
+
275 local_output.transfer();
+
276 }, dim, sopt.num_threads);
+
277 }
+
278 }
+
279
+
280 return;
+
281}
-
278
-
290template<typename Output_ = double, typename Value_, typename Index_>
-
-
291std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p, const Options& sopt) {
-
292 std::vector<Output_> output(p->ncol());
-
293 apply(false, p, output.data(), sopt);
-
294 return output;
-
295}
+
282
+
294template<typename Output_ = double, typename Value_, typename Index_>
+
+
295std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p, const Options& sopt) {
+
296 std::vector<Output_> output(p->ncol());
+
297 apply(false, p, output.data(), sopt);
+
298 return output;
+
299}
-
296
-
307template<typename Output_ = double, typename Value_, typename Index_>
-
-
308std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p) {
-
309 return by_column(p, Options());
-
310}
+
300
+
311template<typename Output_ = double, typename Value_, typename Index_>
+
+
312std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p) {
+
313 return by_column(p, Options());
+
314}
-
311
-
323template<typename Output_ = double, typename Value_, typename Index_>
-
-
324std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p, const Options& sopt) {
-
325 std::vector<Output_> output(p->nrow());
-
326 apply(true, p, output.data(), sopt);
-
327 return output;
-
328}
+
315
+
327template<typename Output_ = double, typename Value_, typename Index_>
+
+
328std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p, const Options& sopt) {
+
329 std::vector<Output_> output(p->nrow());
+
330 apply(true, p, output.data(), sopt);
+
331 return output;
+
332}
-
329
-
340template<typename Output_ = double, typename Value_, typename Index_>
-
-
341std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p) {
-
342 return by_row(p, Options());
-
343}
-
-
344
-
345}
-
346
+
333
+
344template<typename Output_ = double, typename Value_, typename Index_>
+
+
345std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p) {
+
346 return by_row(p, Options());
347}
+
348
-
349#endif
+
349}
+
350
+
351}
+
352
+
353#endif
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
@@ -328,12 +332,12 @@
Running sums from dense data.
Definition sums.hpp:93
void add(const Value_ *ptr)
Definition sums.hpp:107
RunningDense(Index_ num, Output_ *sum, bool skip_nan)
Definition sums.hpp:101
-
Running sums from sparse data.
Definition sums.hpp:143
-
void add(const Value_ *value, const Index_ *index, Index_ number)
Definition sums.hpp:163
-
RunningSparse(Output_ *sum, bool skip_nan, Index_ subtract=0)
Definition sums.hpp:153
-
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &sopt)
Definition sums.hpp:291
-
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &sopt)
Definition sums.hpp:207
-
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &sopt)
Definition sums.hpp:324
+
Running sums from sparse data.
Definition sums.hpp:145
+
void add(const Value_ *value, const Index_ *index, Index_ number)
Definition sums.hpp:165
+
RunningSparse(Output_ *sum, bool skip_nan, Index_ subtract=0)
Definition sums.hpp:155
+
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &sopt)
Definition sums.hpp:295
+
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &sopt)
Definition sums.hpp:211
+
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &sopt)
Definition sums.hpp:328
Output_ direct(const Value_ *ptr, Index_ num, bool skip_nan)
Definition sums.hpp:57
Functions to compute statistics from a tatami::Matrix.
Definition counts.hpp:18
void parallelize(Function_ fun, Index_ tasks, int threads)
diff --git a/docs/variances_8hpp_source.html b/docs/variances_8hpp_source.html index 46da228..4388f5a 100644 --- a/docs/variances_8hpp_source.html +++ b/docs/variances_8hpp_source.html @@ -224,264 +224,272 @@
210 ::tatami_stats::internal::nanable_ifelse<Value_>(
211 my_skip_nan,
212 [&]() {
-
213 for (Index_ i = 0; i < my_num; ++i, ++ptr) {
-
214 auto val = *ptr;
-
215 if (!std::isnan(val)) {
-
216 internal::add_welford(my_mean[i], my_variance[i], val, ++(my_ok_count[i]));
-
217 }
-
218 }
-
219 },
-
220 [&]() {
-
221 ++my_count;
-
222 for (Index_ i = 0; i < my_num; ++i, ++ptr) {
-
223 internal::add_welford(my_mean[i], my_variance[i], *ptr, my_count);
-
224 }
-
225 }
-
226 );
-
227 }
+
213 SUBPAR_VECTORIZABLE
+
214 for (Index_ i = 0; i < my_num; ++i, ++ptr) {
+
215 auto val = *ptr;
+
216 if (!std::isnan(val)) {
+
217 internal::add_welford(my_mean[i], my_variance[i], val, ++(my_ok_count[i]));
+
218 }
+
219 }
+
220 },
+
221 [&]() {
+
222 ++my_count;
+
223 SUBPAR_VECTORIZABLE
+
224 for (Index_ i = 0; i < my_num; ++i, ++ptr) {
+
225 internal::add_welford(my_mean[i], my_variance[i], *ptr, my_count);
+
226 }
+
227 }
+
228 );
+
229 }
-
228
-
-
232 void finish() {
-
233 ::tatami_stats::internal::nanable_ifelse<Value_>(
-
234 my_skip_nan,
-
235 [&]() {
-
236 for (Index_ i = 0; i < my_num; ++i) {
-
237 auto ct = my_ok_count[i];
-
238 if (ct < 2) {
-
239 my_variance[i] = std::numeric_limits<Output_>::quiet_NaN();
-
240 if (ct == 0) {
-
241 my_mean[i] = std::numeric_limits<Output_>::quiet_NaN();
-
242 }
-
243 } else {
-
244 my_variance[i] /= ct - 1;
-
245 }
-
246 }
-
247 },
-
248 [&]() {
-
249 if (my_count < 2) {
-
250 std::fill_n(my_variance, my_num, std::numeric_limits<Output_>::quiet_NaN());
-
251 if (my_count == 0) {
-
252 std::fill_n(my_mean, my_num, std::numeric_limits<Output_>::quiet_NaN());
-
253 }
-
254 } else {
-
255 for (Index_ i = 0; i < my_num; ++i) {
-
256 my_variance[i] /= my_count - 1;
-
257 }
-
258 }
-
259 }
-
260 );
-
261 }
+
230
+
+
234 void finish() {
+
235 ::tatami_stats::internal::nanable_ifelse<Value_>(
+
236 my_skip_nan,
+
237 [&]() {
+
238 SUBPAR_VECTORIZABLE
+
239 for (Index_ i = 0; i < my_num; ++i) {
+
240 auto ct = my_ok_count[i];
+
241 if (ct < 2) {
+
242 my_variance[i] = std::numeric_limits<Output_>::quiet_NaN();
+
243 if (ct == 0) {
+
244 my_mean[i] = std::numeric_limits<Output_>::quiet_NaN();
+
245 }
+
246 } else {
+
247 my_variance[i] /= ct - 1;
+
248 }
+
249 }
+
250 },
+
251 [&]() {
+
252 if (my_count < 2) {
+
253 std::fill_n(my_variance, my_num, std::numeric_limits<Output_>::quiet_NaN());
+
254 if (my_count == 0) {
+
255 std::fill_n(my_mean, my_num, std::numeric_limits<Output_>::quiet_NaN());
+
256 }
+
257 } else {
+
258 SUBPAR_VECTORIZABLE
+
259 for (Index_ i = 0; i < my_num; ++i) {
+
260 my_variance[i] /= my_count - 1;
+
261 }
+
262 }
+
263 }
+
264 );
+
265 }
-
262
-
263private:
-
264 Index_ my_num;
-
265 Output_* my_mean;
-
266 Output_* my_variance;
-
267 bool my_skip_nan;
-
268 Index_ my_count = 0;
-
269 typename std::conditional<std::numeric_limits<Value_>::has_quiet_NaN, std::vector<Index_>, internal::MockVector<Index_> >::type my_ok_count;
-
270};
+
266
+
267private:
+
268 Index_ my_num;
+
269 Output_* my_mean;
+
270 Output_* my_variance;
+
271 bool my_skip_nan;
+
272 Index_ my_count = 0;
+
273 typename std::conditional<std::numeric_limits<Value_>::has_quiet_NaN, std::vector<Index_>, internal::MockVector<Index_> >::type my_ok_count;
+
274};
-
271
-
282template<typename Output_, typename Value_, typename Index_>
-
- -
284public:
-
-
296 RunningSparse(Index_ num, Output_* mean, Output_* variance, bool skip_nan, Index_ subtract = 0) :
-
297 my_num(num), my_mean(mean), my_variance(variance), my_nonzero(num), my_skip_nan(skip_nan), my_subtract(subtract), my_nan(skip_nan ? num : 0) {}
+
275
+
286template<typename Output_, typename Value_, typename Index_>
+
+ +
288public:
+
+
300 RunningSparse(Index_ num, Output_* mean, Output_* variance, bool skip_nan, Index_ subtract = 0) :
+
301 my_num(num), my_mean(mean), my_variance(variance), my_nonzero(num), my_skip_nan(skip_nan), my_subtract(subtract), my_nan(skip_nan ? num : 0) {}
-
298
-
-
305 void add(const Value_* value, const Index_* index, Index_ number) {
-
306 ++my_count;
-
307
-
308 ::tatami_stats::internal::nanable_ifelse<Value_>(
-
309 my_skip_nan,
-
310 [&]() {
-
311 for (Index_ i = 0; i < number; ++i) {
-
312 auto val = value[i];
-
313 auto ri = index[i] - my_subtract;
-
314 if (std::isnan(val)) {
-
315 ++my_nan[ri];
-
316 } else {
-
317 internal::add_welford(my_mean[ri], my_variance[ri], val, ++(my_nonzero[ri]));
-
318 }
-
319 }
-
320 },
-
321 [&]() {
-
322 for (Index_ i = 0; i < number; ++i) {
-
323 auto ri = index[i] - my_subtract;
-
324 internal::add_welford(my_mean[ri], my_variance[ri], value[i], ++(my_nonzero[ri]));
-
325 }
-
326 }
-
327 );
-
328 }
+
302
+
+
309 void add(const Value_* value, const Index_* index, Index_ number) {
+
310 ++my_count;
+
311
+
312 ::tatami_stats::internal::nanable_ifelse<Value_>(
+
313 my_skip_nan,
+
314 [&]() {
+
315 SUBPAR_VECTORIZABLE
+
316 for (Index_ i = 0; i < number; ++i) {
+
317 auto val = value[i];
+
318 auto ri = index[i] - my_subtract;
+
319 if (std::isnan(val)) {
+
320 ++my_nan[ri];
+
321 } else {
+
322 internal::add_welford(my_mean[ri], my_variance[ri], val, ++(my_nonzero[ri]));
+
323 }
+
324 }
+
325 },
+
326 [&]() {
+
327 SUBPAR_VECTORIZABLE
+
328 for (Index_ i = 0; i < number; ++i) {
+
329 auto ri = index[i] - my_subtract;
+
330 internal::add_welford(my_mean[ri], my_variance[ri], value[i], ++(my_nonzero[ri]));
+
331 }
+
332 }
+
333 );
+
334 }
-
329
-
-
333 void finish() {
-
334 ::tatami_stats::internal::nanable_ifelse<Value_>(
-
335 my_skip_nan,
-
336 [&]() {
-
337 for (Index_ i = 0; i < my_num; ++i) {
-
338 auto& curM = my_mean[i];
-
339 auto& curV = my_variance[i];
-
340 Index_ ct = my_count - my_nan[i];
-
341
-
342 if (ct < 2) {
-
343 curV = std::numeric_limits<Output_>::quiet_NaN();
-
344 if (ct == 0) {
-
345 curM = std::numeric_limits<Output_>::quiet_NaN();
-
346 }
-
347 } else {
-
348 internal::add_welford_zeros(curM, curV, my_nonzero[i], ct);
-
349 curV /= ct - 1;
-
350 }
-
351 }
-
352 },
-
353 [&]() {
-
354 if (my_count < 2) {
-
355 std::fill_n(my_variance, my_num, std::numeric_limits<Output_>::quiet_NaN());
-
356 if (my_count == 0) {
-
357 std::fill_n(my_mean, my_num, std::numeric_limits<Output_>::quiet_NaN());
-
358 }
-
359 } else {
-
360 for (Index_ i = 0; i < my_num; ++i) {
-
361 auto& var = my_variance[i];
-
362 internal::add_welford_zeros(my_mean[i], var, my_nonzero[i], my_count);
-
363 var /= my_count - 1;
-
364 }
-
365 }
-
366 }
-
367 );
-
368 }
+
335
+
+
339 void finish() {
+
340 ::tatami_stats::internal::nanable_ifelse<Value_>(
+
341 my_skip_nan,
+
342 [&]() {
+
343 SUBPAR_VECTORIZABLE
+
344 for (Index_ i = 0; i < my_num; ++i) {
+
345 auto& curM = my_mean[i];
+
346 auto& curV = my_variance[i];
+
347 Index_ ct = my_count - my_nan[i];
+
348
+
349 if (ct < 2) {
+
350 curV = std::numeric_limits<Output_>::quiet_NaN();
+
351 if (ct == 0) {
+
352 curM = std::numeric_limits<Output_>::quiet_NaN();
+
353 }
+
354 } else {
+
355 internal::add_welford_zeros(curM, curV, my_nonzero[i], ct);
+
356 curV /= ct - 1;
+
357 }
+
358 }
+
359 },
+
360 [&]() {
+
361 if (my_count < 2) {
+
362 std::fill_n(my_variance, my_num, std::numeric_limits<Output_>::quiet_NaN());
+
363 if (my_count == 0) {
+
364 std::fill_n(my_mean, my_num, std::numeric_limits<Output_>::quiet_NaN());
+
365 }
+
366 } else {
+
367 SUBPAR_VECTORIZABLE
+
368 for (Index_ i = 0; i < my_num; ++i) {
+
369 auto& var = my_variance[i];
+
370 internal::add_welford_zeros(my_mean[i], var, my_nonzero[i], my_count);
+
371 var /= my_count - 1;
+
372 }
+
373 }
+
374 }
+
375 );
+
376 }
-
369
-
370private:
-
371 Index_ my_num;
-
372 Output_* my_mean;
-
373 Output_* my_variance;
-
374 std::vector<Index_> my_nonzero;
-
375 bool my_skip_nan;
-
376 Index_ my_subtract;
-
377 Index_ my_count = 0;
-
378 typename std::conditional<std::numeric_limits<Value_>::has_quiet_NaN, std::vector<Index_>, internal::MockVector<Index_> >::type my_nan;
-
379};
+
377
+
378private:
+
379 Index_ my_num;
+
380 Output_* my_mean;
+
381 Output_* my_variance;
+
382 std::vector<Index_> my_nonzero;
+
383 bool my_skip_nan;
+
384 Index_ my_subtract;
+
385 Index_ my_count = 0;
+
386 typename std::conditional<std::numeric_limits<Value_>::has_quiet_NaN, std::vector<Index_>, internal::MockVector<Index_> >::type my_nan;
+
387};
-
380
-
397template<typename Value_, typename Index_, typename Output_>
-
-
398void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* output, const Options& vopt) {
-
399 auto dim = (row ? p->nrow() : p->ncol());
-
400 auto otherdim = (row ? p->ncol() : p->nrow());
-
401 const bool direct = p->prefer_rows() == row;
-
402
-
403 if (p->sparse()) {
-
404 if (direct) {
-
405 tatami::Options opt;
-
406 opt.sparse_extract_index = false;
-
407 tatami::parallelize([&](int, Index_ s, Index_ l) {
-
408 auto ext = tatami::consecutive_extractor<true>(p, row, s, l);
-
409 std::vector<Value_> vbuffer(otherdim);
-
410 for (Index_ x = 0; x < l; ++x) {
-
411 auto out = ext->fetch(vbuffer.data(), NULL);
-
412 output[x + s] = variances::direct<Output_>(out.value, out.number, otherdim, vopt.skip_nan).second;
-
413 }
-
414 }, dim, vopt.num_threads);
-
415
-
416 } else {
-
417 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
-
418 auto ext = tatami::consecutive_extractor<true>(p, !row, static_cast<Index_>(0), otherdim, s, l);
-
419 std::vector<Value_> vbuffer(l);
-
420 std::vector<Index_> ibuffer(l);
-
421
-
422 std::vector<Output_> running_means(l);
-
423 LocalOutputBuffer<Output_> local_output(thread, s, l, output);
-
424 variances::RunningSparse<Output_, Value_, Index_> runner(l, running_means.data(), local_output.data(), vopt.skip_nan, s);
-
425
-
426 for (Index_ x = 0; x < otherdim; ++x) {
-
427 auto out = ext->fetch(vbuffer.data(), ibuffer.data());
-
428 runner.add(out.value, out.index, out.number);
-
429 }
-
430 runner.finish();
-
431
-
432 local_output.transfer();
-
433 }, dim, vopt.num_threads);
-
434 }
-
435
-
436 } else {
-
437 if (direct) {
-
438 tatami::parallelize([&](int, Index_ s, Index_ l) {
-
439 auto ext = tatami::consecutive_extractor<false>(p, row, s, l);
-
440 std::vector<Value_> buffer(otherdim);
-
441 for (Index_ x = 0; x < l; ++x) {
-
442 auto out = ext->fetch(buffer.data());
-
443 output[x + s] = variances::direct<Output_>(out, otherdim, vopt.skip_nan).second;
-
444 }
-
445 }, dim, vopt.num_threads);
-
446
-
447 } else {
-
448 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
-
449 auto ext = tatami::consecutive_extractor<false>(p, !row, static_cast<Index_>(0), otherdim, s, l);
-
450 std::vector<Value_> buffer(l);
-
451
-
452 std::vector<Output_> running_means(l);
-
453 LocalOutputBuffer<Output_> local_output(thread, s, l, output);
-
454 variances::RunningDense<Output_, Value_, Index_> runner(l, running_means.data(), local_output.data(), vopt.skip_nan);
-
455
-
456 for (Index_ x = 0; x < otherdim; ++x) {
-
457 runner.add(ext->fetch(buffer.data()));
-
458 }
-
459 runner.finish();
-
460
-
461 local_output.transfer();
-
462 }, dim, vopt.num_threads);
-
463 }
-
464 }
-
465}
+
388
+
405template<typename Value_, typename Index_, typename Output_>
+
+
406void apply(bool row, const tatami::Matrix<Value_, Index_>* p, Output_* output, const Options& vopt) {
+
407 auto dim = (row ? p->nrow() : p->ncol());
+
408 auto otherdim = (row ? p->ncol() : p->nrow());
+
409 const bool direct = p->prefer_rows() == row;
+
410
+
411 if (p->sparse()) {
+
412 if (direct) {
+
413 tatami::Options opt;
+
414 opt.sparse_extract_index = false;
+
415 tatami::parallelize([&](int, Index_ s, Index_ l) {
+
416 auto ext = tatami::consecutive_extractor<true>(p, row, s, l);
+
417 std::vector<Value_> vbuffer(otherdim);
+
418 for (Index_ x = 0; x < l; ++x) {
+
419 auto out = ext->fetch(vbuffer.data(), NULL);
+
420 output[x + s] = variances::direct<Output_>(out.value, out.number, otherdim, vopt.skip_nan).second;
+
421 }
+
422 }, dim, vopt.num_threads);
+
423
+
424 } else {
+
425 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
+
426 auto ext = tatami::consecutive_extractor<true>(p, !row, static_cast<Index_>(0), otherdim, s, l);
+
427 std::vector<Value_> vbuffer(l);
+
428 std::vector<Index_> ibuffer(l);
+
429
+
430 std::vector<Output_> running_means(l);
+
431 LocalOutputBuffer<Output_> local_output(thread, s, l, output);
+
432 variances::RunningSparse<Output_, Value_, Index_> runner(l, running_means.data(), local_output.data(), vopt.skip_nan, s);
+
433
+
434 for (Index_ x = 0; x < otherdim; ++x) {
+
435 auto out = ext->fetch(vbuffer.data(), ibuffer.data());
+
436 runner.add(out.value, out.index, out.number);
+
437 }
+
438 runner.finish();
+
439
+
440 local_output.transfer();
+
441 }, dim, vopt.num_threads);
+
442 }
+
443
+
444 } else {
+
445 if (direct) {
+
446 tatami::parallelize([&](int, Index_ s, Index_ l) {
+
447 auto ext = tatami::consecutive_extractor<false>(p, row, s, l);
+
448 std::vector<Value_> buffer(otherdim);
+
449 for (Index_ x = 0; x < l; ++x) {
+
450 auto out = ext->fetch(buffer.data());
+
451 output[x + s] = variances::direct<Output_>(out, otherdim, vopt.skip_nan).second;
+
452 }
+
453 }, dim, vopt.num_threads);
+
454
+
455 } else {
+
456 tatami::parallelize([&](int thread, Index_ s, Index_ l) {
+
457 auto ext = tatami::consecutive_extractor<false>(p, !row, static_cast<Index_>(0), otherdim, s, l);
+
458 std::vector<Value_> buffer(l);
+
459
+
460 std::vector<Output_> running_means(l);
+
461 LocalOutputBuffer<Output_> local_output(thread, s, l, output);
+
462 variances::RunningDense<Output_, Value_, Index_> runner(l, running_means.data(), local_output.data(), vopt.skip_nan);
+
463
+
464 for (Index_ x = 0; x < otherdim; ++x) {
+
465 runner.add(ext->fetch(buffer.data()));
+
466 }
+
467 runner.finish();
+
468
+
469 local_output.transfer();
+
470 }, dim, vopt.num_threads);
+
471 }
+
472 }
+
473}
-
466
-
479template<typename Output_ = double, typename Value_, typename Index_>
-
-
480std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p, const Options& vopt) {
-
481 std::vector<Output_> output(p->ncol());
-
482 apply(false, p, output.data(), vopt);
-
483 return output;
-
484}
+
474
+
487template<typename Output_ = double, typename Value_, typename Index_>
+
+
488std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p, const Options& vopt) {
+
489 std::vector<Output_> output(p->ncol());
+
490 apply(false, p, output.data(), vopt);
+
491 return output;
+
492}
-
485
-
497template<typename Output_ = double, typename Value_, typename Index_>
-
-
498std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p) {
-
499 Options vopt;
-
500 return by_column(p, vopt);
-
501}
+
493
+
505template<typename Output_ = double, typename Value_, typename Index_>
+
+
506std::vector<Output_> by_column(const tatami::Matrix<Value_, Index_>* p) {
+
507 Options vopt;
+
508 return by_column(p, vopt);
+
509}
-
502
-
515template<typename Output_ = double, typename Value_, typename Index_>
-
-
516std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p, const Options& vopt) {
-
517 std::vector<Output_> output(p->nrow());
-
518 apply(true, p, output.data(), vopt);
-
519 return output;
-
520}
+
510
+
523template<typename Output_ = double, typename Value_, typename Index_>
+
+
524std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p, const Options& vopt) {
+
525 std::vector<Output_> output(p->nrow());
+
526 apply(true, p, output.data(), vopt);
+
527 return output;
+
528}
-
521
-
533template<typename Output_ = double, typename Value_, typename Index_>
-
-
534std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p) {
-
535 Options vopt;
-
536 return by_row(p, vopt);
-
537}
+
529
+
541template<typename Output_ = double, typename Value_, typename Index_>
+
+
542std::vector<Output_> by_row(const tatami::Matrix<Value_, Index_>* p) {
+
543 Options vopt;
+
544 return by_row(p, vopt);
+
545}
-
538
-
539}
-
540
-
541}
-
542
-
543#endif
+
546
+
547}
+
548
+
549}
+
550
+
551#endif
virtual Index_ ncol() const=0
virtual Index_ nrow() const=0
@@ -492,16 +500,16 @@
Output_ * data()
Definition utils.hpp:111
Running variances from dense data.
Definition variances.hpp:192
void add(const Value_ *ptr)
Definition variances.hpp:209
-
void finish()
Definition variances.hpp:232
+
void finish()
Definition variances.hpp:234
RunningDense(Index_ num, Output_ *mean, Output_ *variance, bool skip_nan)
Definition variances.hpp:202
-
Running variances from sparse data.
Definition variances.hpp:283
-
void finish()
Definition variances.hpp:333
-
RunningSparse(Index_ num, Output_ *mean, Output_ *variance, bool skip_nan, Index_ subtract=0)
Definition variances.hpp:296
-
void add(const Value_ *value, const Index_ *index, Index_ number)
Definition variances.hpp:305
-
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &vopt)
Definition variances.hpp:516
+
Running variances from sparse data.
Definition variances.hpp:287
+
void finish()
Definition variances.hpp:339
+
RunningSparse(Index_ num, Output_ *mean, Output_ *variance, bool skip_nan, Index_ subtract=0)
Definition variances.hpp:300
+
void add(const Value_ *value, const Index_ *index, Index_ number)
Definition variances.hpp:309
+
std::vector< Output_ > by_row(const tatami::Matrix< Value_, Index_ > *p, const Options &vopt)
Definition variances.hpp:524
std::pair< Output_, Output_ > direct(const Value_ *value, Index_ num_nonzero, Index_ num_all, bool skip_nan)
Definition variances.hpp:95
-
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &vopt)
Definition variances.hpp:398
-
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &vopt)
Definition variances.hpp:480
+
void apply(bool row, const tatami::Matrix< Value_, Index_ > *p, Output_ *output, const Options &vopt)
Definition variances.hpp:406
+
std::vector< Output_ > by_column(const tatami::Matrix< Value_, Index_ > *p, const Options &vopt)
Definition variances.hpp:488
Functions to compute statistics from a tatami::Matrix.
Definition counts.hpp:18
void parallelize(Function_ fun, Index_ tasks, int threads)
auto consecutive_extractor(const Matrix< Value_, Index_ > *mat, bool row, Index_ iter_start, Index_ iter_length, Args_ &&... args)