Skip to content

Commit

Permalink
Merge branch 'main' into unbalanced_movie
Browse files Browse the repository at this point in the history
  • Loading branch information
HeikoSchuett committed Aug 31, 2023
2 parents 8db0b92 + c1e48fb commit 1e837f7
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 29 deletions.
17 changes: 1 addition & 16 deletions src/rsatoolbox/data/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def sort_by(self, by):
"""
desc = self.obs_descriptors[by]
order = np.argsort(desc)
order = np.argsort(desc, kind='stable')
self.measurements = self.measurements[order]
self.obs_descriptors = subset_descriptor(self.obs_descriptors, order)

Expand Down Expand Up @@ -685,21 +685,6 @@ def subset_time(self, by, t_from, t_to):
time_descriptors=time_descriptors)
return dataset

def sort_by(self, by):
""" sorts the dataset by a given observation descriptor
Args:
by(String): the descriptor by which the dataset shall be sorted
Returns:
---
"""
desc = self.obs_descriptors[by]
order = np.argsort(desc)
self.measurements = self.measurements[order]
self.obs_descriptors = subset_descriptor(self.obs_descriptors, order)

def convert_to_dataset(self, by):
""" converts to Dataset long format.
time dimension is absorbed into observation dimension
Expand Down
26 changes: 13 additions & 13 deletions src/rsatoolbox/util/inference_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def all_tests(
p_pairwise = t_tests(evaluations, diff_var, dof=dof)
p_zero = t_test_0(evaluations, model_var, dof=dof)
p_noise = t_test_nc(evaluations, noise_ceil_var[:, 0],
np.mean(noise_ceil[0]), dof)
np.nanmean(noise_ceil[0]), dof)
elif test_type == 'bootstrap':
if len(noise_ceil.shape) > 1:
noise_lower_bs = noise_ceil[0]
Expand All @@ -217,7 +217,7 @@ def all_tests(
diffs = noise_lower_bs - evaluations
p_noise = ((diffs <= 0).sum(axis=0) + 1) / evaluations.shape[0]
elif test_type == 'ranksum':
noise_c = np.mean(noise_ceil[0])
noise_c = np.nanmean(noise_ceil[0])
p_pairwise = ranksum_pair_test(evaluations)
p_zero = ranksum_value_test(evaluations, 0)
p_noise = ranksum_value_test(evaluations, noise_c)
Expand Down Expand Up @@ -313,7 +313,7 @@ def nc_tests(evaluations, noise_ceil, test_type='t-test',
"""
if test_type == 't-test':
p_noise = t_test_nc(evaluations, noise_ceil_var[:, 0],
np.mean(noise_ceil[0]), dof)
np.nanmean(noise_ceil[0]), dof)
elif test_type == 'bootstrap':
if len(noise_ceil.shape) > 1:
noise_lower_bs = noise_ceil[0]
Expand All @@ -323,7 +323,7 @@ def nc_tests(evaluations, noise_ceil, test_type='t-test',
diffs = noise_lower_bs - evaluations
p_noise = ((diffs <= 0).sum(axis=0) + 1) / evaluations.shape[0]
elif test_type == 'ranksum':
noise_c = np.mean(noise_ceil[0])
noise_c = np.nanmean(noise_ceil[0])
p_noise = ranksum_value_test(evaluations, noise_c)
else:
raise ValueError('test_type not recognized.\n'
Expand Down Expand Up @@ -405,7 +405,7 @@ def bootstrap_pair_tests(evaluations):
"""
proportions = np.zeros((evaluations.shape[1], evaluations.shape[1]))
while len(evaluations.shape) > 2:
evaluations = np.mean(evaluations, axis=-1)
evaluations = np.nanmean(evaluations, axis=-1)
for i_model in range(evaluations.shape[1] - 1):
for j_model in range(i_model + 1, evaluations.shape[1]):
proportions[i_model, j_model] = np.sum(
Expand Down Expand Up @@ -439,9 +439,9 @@ def t_tests(evaluations, variances, dof=1):
if variances is None:
raise ValueError('No variance estimates provided for t_test!')
n_model = evaluations.shape[1]
evaluations = np.mean(evaluations, 0)
evaluations = np.nanmean(evaluations, 0)
while evaluations.ndim > 1:
evaluations = np.mean(evaluations, axis=-1)
evaluations = np.nanmean(evaluations, axis=-1)
C = pairwise_contrast(np.arange(n_model))
diffs = C @ evaluations
t = diffs / np.sqrt(np.maximum(variances, np.finfo(float).eps))
Expand All @@ -468,9 +468,9 @@ def t_test_0(evaluations, variances, dof=1):
"""
if variances is None:
raise ValueError('No variance estimates provided for t_test!')
evaluations = np.mean(evaluations, 0)
evaluations = np.nanmean(evaluations, 0)
while evaluations.ndim > 1:
evaluations = np.mean(evaluations, axis=-1)
evaluations = np.nanmean(evaluations, axis=-1)
t = evaluations / np.sqrt(np.maximum(variances, np.finfo(float).eps))
p = 1 - stats.t.cdf(t, dof)
return p
Expand Down Expand Up @@ -501,9 +501,9 @@ def t_test_nc(evaluations, variances, noise_ceil, dof=1):
"""
if variances is None:
raise ValueError('No variance estimates provided for t_test!')
evaluations = np.mean(evaluations, 0)
evaluations = np.nanmean(evaluations, 0)
while evaluations.ndim > 1:
evaluations = np.mean(evaluations, axis=-1)
evaluations = np.nanmean(evaluations, axis=-1)
p = np.empty(len(evaluations))
for i, eval_i in enumerate(evaluations):
t = (eval_i - noise_ceil) / np.sqrt(
Expand Down Expand Up @@ -649,9 +649,9 @@ def get_errorbars(model_var, evaluations, dof, error_bars='sem',
(1, n_models)),
evaluations),
axis=0)
perf = np.mean(evaluations, 0)
perf = np.nanmean(evaluations, 0)
while perf.ndim > 1:
perf = np.mean(perf, -1)
perf = np.nanmean(perf, -1)
errorbar_low = -(np.quantile(framed_evals, prop_cut, axis=0)
- perf)
errorbar_high = (np.quantile(framed_evals, 1 - prop_cut,
Expand Down

0 comments on commit 1e837f7

Please sign in to comment.