Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
config.ExternalDepFailed('libsvm', exc)
except AttributeError, exc:
config.ExternalDepFailed('libsvm', 'libsvm version >= 2.91 required')
else:
if os.getenv('MDP_DISABLE_LIBSVM'):
config.ExternalDepFailed('libsvm', 'disabled')
else:
config.ExternalDepFound('libsvm', libsvm.libsvm._name)
# joblib
try:
import joblib
except ImportError, exc:
config.ExternalDepFailed('joblib', exc)
else:
version = joblib.__version__
if os.getenv('MDP_DISABLE_JOBLIB'):
config.ExternalDepFailed('joblib', 'disabled')
elif _version_too_old(version, (0,4,3)):
config.ExternalDepFailed('joblib',
'version %s is too old' % version)
else:
config.ExternalDepFound('joblib', version)
# sklearn
try:
try:
import sklearn
except ImportError:
import scikits.learn as sklearn
version = sklearn.__version__
except ImportError, exc:
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = _check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.debug('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
if LooseVersion(joblib.__version__) < LooseVersion('0.12'):
# Deal with change of API in joblib
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
else:
m = Memory(location=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
print("numpy: %s, %s" % (numpy.__version__, numpy.__file__))
except ImportError:
print("no numpy")
try:
import scipy
print("scipy: %s, %s" % (scipy.__version__, scipy.__file__))
except ImportError:
print("no scipy")
try:
import pyproj
print("pyproj: %s, %s" % (pyproj.__version__, pyproj.__file__))
except ImportError:
print("no pyproj")
try:
import joblib
print("joblib: %s, %s" % (joblib.__version__, joblib.__file__))
except ImportError:
print("no joblib")
try:
import rasterio
print("rasterio: %s, %s" % (rasterio.__version__, rasterio.__file__))
except ImportError:
print("no rasterio")
try:
import geopandas
print("geopandas: %s, %s" % (geopandas.__version__, geopandas.__file__))
except ImportError:
print("no geopandas")
try:
import matplotlib
matplotlib.use('Agg')
print("matplotlib: %s, %s" % (matplotlib.__version__, matplotlib.__file__))
import joblib
# For some commands, use setuptools
if len(set(('develop', 'sdist', 'release', 'bdist', 'bdist_egg', 'bdist_dumb',
'bdist_rpm', 'bdist_wheel', 'bdist_wininst', 'install_egg_info',
'egg_info', 'easy_install', 'upload',
)).intersection(sys.argv)) > 0:
import setuptools
extra_setuptools_args = {}
if __name__ == '__main__':
setup(name='joblib',
version=joblib.__version__,
author='Gael Varoquaux',
author_email='gael.varoquaux@normalesup.org',
url='https://joblib.readthedocs.io',
description=("Lightweight pipelining: using Python functions "
"as pipeline jobs."),
long_description=joblib.__doc__,
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
Hard condstraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based even
if the user asked for a non-thread based backend with
parallel_backend.
See joblib.Parallel documentation for more details
"""
import joblib
if joblib.__version__ >= LooseVersion('0.12'):
return kwargs
extra_args = set(kwargs.keys()).difference({'prefer', 'require'})
if extra_args:
raise NotImplementedError('unhandled arguments %s with joblib %s'
% (list(extra_args), joblib.__version__))
args = {}
if 'prefer' in kwargs:
prefer = kwargs['prefer']
if prefer not in ['threads', 'processes', None]:
raise ValueError('prefer=%s is not supported' % prefer)
args['backend'] = {'threads': 'threading',
'processes': 'multiprocessing',
None: None}[prefer]
if 'require' in kwargs:
require = kwargs['require']
if require not in [None, 'sharedmem']:
raise ValueError('require=%s is not supported' % require)
if require == 'sharedmem':
args['backend'] = 'threading'
return args
'ClassifierNode', 'ClassifierCumulator',
'get_eta', 'graph', 'helper_funcs', 'hinet', 'nodes',
'numx_description', 'pca', 'sfa', 'utils', 'whitening',
'parallel', 'numx_version',
'extension_method', 'ExtensionNodeMetaclass', 'ExtensionNode',
'get_extensions', 'with_extension',
'activate_extension', 'deactivate_extension', 'activate_extensions',
'deactivate_extensions',
'ClassifierNode',
'config'
]
with config.ExternalDep('joblib') as dep:
import joblib
__all__ += ['joblib']
dep.found(joblib.__version__)
del dep
if config.has_joblib:
import caching
__all__ += ['caching']
# if the above failed, try to detect a libsvm installed by PyPI,
# which lives in a different namespace
try:
import libsvm
config.ExternalDepFound('libsvm', libsvm.__version__)
except ImportError as exc:
libsvm_error += str(exc)
config.ExternalDepFailed('libsvm', libsvm_error)
# joblib
try:
import joblib
except ImportError as exc:
config.ExternalDepFailed('joblib', exc)
else:
version = joblib.__version__
if os.getenv('MDP_DISABLE_JOBLIB'):
config.ExternalDepFailed('joblib', 'disabled')
elif _version_too_old(version, (0, 4, 3)):
config.ExternalDepFailed('joblib',
'version %s is too old' % version)
else:
config.ExternalDepFound('joblib', version)
# sklearn
try:
try:
import sklearn
except ImportError:
import scikits.learn as sklearn
version = sklearn.__version__
except ImportError as exc:
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
lfw_home, data_folder_path = _check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.debug('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
if LooseVersion(joblib.__version__) < LooseVersion('0.12'):
# Deal with change of API in joblib
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
else:
m = Memory(location=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
X = faces.reshape(len(faces), -1)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'lfw.rst')) as rst_file:
fdescr = rst_file.read()
Parameters
----------
memory : None, str or object with the joblib.Memory interface
Returns
-------
memory : object with the joblib.Memory interface
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
"""
if memory is None or isinstance(memory, str):
if LooseVersion(joblib.__version__) < '0.12':
memory = joblib.Memory(cachedir=memory, verbose=0)
else:
memory = joblib.Memory(location=memory, verbose=0)
elif not hasattr(memory, 'cache'):
raise ValueError("'memory' should be None, a string or have the same"
" interface as joblib.Memory."
" Got memory='{}' instead.".format(memory))
return memory
prefer : str in {'processes', 'threads'} or None
Soft hint to choose the default backend if no specific backend
was selected with the parallel_backend context manager.
require : 'sharedmem' or None
Hard condstraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based even
if the user asked for a non-thread based backend with
parallel_backend.
See joblib.Parallel documentation for more details
"""
import joblib
if joblib.__version__ >= LooseVersion('0.12'):
return kwargs
extra_args = set(kwargs.keys()).difference({'prefer', 'require'})
if extra_args:
raise NotImplementedError('unhandled arguments %s with joblib %s'
% (list(extra_args), joblib.__version__))
args = {}
if 'prefer' in kwargs:
prefer = kwargs['prefer']
if prefer not in ['threads', 'processes', None]:
raise ValueError('prefer=%s is not supported' % prefer)
args['backend'] = {'threads': 'threading',
'processes': 'multiprocessing',
None: None}[prefer]
if 'require' in kwargs: