Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
self._window_steps = window_steps
self._sfa_list = []
self._vectorizer_list = []
self._relevant_features_list = []
self.vocabulary_ = {}
for (window_size, window_step) in zip(window_sizes, window_steps):
n_windows = ((n_timestamps - window_size + window_step)
// window_step)
X_windowed = _windowed_view(
X, n_samples, n_timestamps, window_size, window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum,
anova=self.anova, norm_mean=self.norm_mean,
norm_std=self.norm_std, n_bins=self.n_bins,
strategy=self.strategy, alphabet=self.alphabet
)
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
vectorizer = CountVectorizer(ngram_range=(1, 2))
X_counts = vectorizer.fit_transform(X_bow)
chi2_statistics, _ = chi2(X_counts, y)
self._sfa_list = []
self._vectorizer_list = []
self._relevant_features_list = []
self.vocabulary_ = {}
X_features = coo_matrix((n_samples, 0), dtype=np.int64)
for (window_size, window_step) in zip(window_sizes, window_steps):
n_windows = ((n_timestamps - window_size + window_step)
// window_step)
X_windowed = _windowed_view(
X, n_samples, n_timestamps, window_size, window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum,
anova=self.anova, norm_mean=self.norm_mean,
norm_std=self.norm_std, n_bins=self.n_bins,
strategy=self.strategy, alphabet=self.alphabet
)
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
vectorizer = CountVectorizer(ngram_range=(1, 2))
X_counts = vectorizer.fit_transform(X_bow)
chi2_statistics, _ = chi2(X_counts, y)
"""
X = check_array(X)
n_samples, n_timestamps = X.shape
if y is not None:
check_classification_targets(y)
window_size, window_step = self._check_params(n_timestamps)
n_windows = (n_timestamps - window_size + window_step) // window_step
X_windowed = _windowed_view(
X, n_samples, n_timestamps, window_size, window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,
norm_mean=self.norm_mean, norm_std=self.norm_std,
n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet
)
if y is None:
y_repeated = None
else:
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
"""
X = check_array(X)
n_samples, n_timestamps = X.shape
if y is not None:
check_classification_targets(y)
window_size, window_step = self._check_params(n_timestamps)
n_windows = (n_timestamps - window_size + window_step) // window_step
X_windowed = _windowed_view(
X, n_samples, n_timestamps, window_size, window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,
norm_mean=self.norm_mean, norm_std=self.norm_std,
n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet
)
if y is None:
y_repeated = None
else:
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
n_samples, n_timestamps = X.shape
check_classification_targets(y)
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = le.classes_
n_classes = self.classes_.size
window_size, window_step = self._check_params(n_timestamps)
n_windows = (n_timestamps - window_size + window_step) // window_step
X_windowed = _windowed_view(
X, n_samples, n_timestamps, window_size, window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,
norm_mean=self.norm_mean, norm_std=self.norm_std,
n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet
)
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
np.full(n_samples, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])
for i in range(n_samples)])