Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def set_xml(sourcehost, sourcepath, xmlstr, hddriver, diskpath, ks, nfs_server, logger):
boot_driver = install_common.get_boot_driver(hddriver, logger)
xmlstr = install_common.set_disk_xml(hddriver, xmlstr, diskpath,
logger, sourcehost, sourcepath)
ks_name = os.path.basename(ks)
tmppath = tempfile.mkdtemp()
cmd = "mount -t nfs %s:/srv/www/html/test-api-ks/tmp-ks %s" % (nfs_server, tmppath)
ret = process.run(cmd, shell=True, ignore_status=True)
if ret.exit_status:
logger.error("mount failed: %s" % cmd)
return 1
if os.path.exists("%s/%s" % (tmppath, ks_name)):
os.remove("%s/%s" % (tmppath, ks_name))
urllib.request.urlretrieve(ks, "%s/%s" % (tmppath, ks_name))
old_ks_fp = open('%s/%s' % (tmppath, ks_name), "r+")
new_ks_fp = open("%s/test_api_iso_ks.cfg" % tmppath, "w")
old_ks_file = old_ks_fp.read()
old_ks_file = old_ks_file.replace("--boot-drive=", "--boot-drive=%s" % boot_driver)
new_ks_fp.write(old_ks_file)
new_ks_fp.close()
old_ks_fp.close()
shutil.move("%s/test_api_iso_ks.cfg" % tmppath, "%s/%s" % (tmppath, ks_name))
cmd = "umount %s" % tmppath
ret = process.run(cmd, shell=True, ignore_status=True)
if ret.exit_status:
logger.error("umount failed: %s" % cmd)
return 1
xmlstr = xmlstr.replace('KS', 'http://%s/test-api-ks/tmp-ks/%s' % (nfs_server, ks_name))
shutil.rmtree(tmppath)
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def StoreInitialImages():
PositiveImagesLink = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n04082710'
PositiveImageURLs = urllib.request.urlopen(PositiveImagesLink).read().decode(encoding = 'UTF-8',errors = 'strict')
path = '/Users/Gale/Documents/ObjectDetector/return-cat_in_a_box/Negative/'
if not os.path.exists(path):
os.makedirs(path)
count = 1045
for i in PositiveImageURLs.split('\n'):
try:
print(i)
path2 = path + str(count) + '.jpg'
urllib.request.urlretrieve(i, path2)
image = cv2.imread(path2)
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
resizedImage = cv2.resize(grayImage, (100, 100))
cv2.imwrite(path2, resizedImage)
except Exception as e:
print(str(e))
count = count + 1
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
if not os.path.exists(DEST_DIRECTORY):
os.makedirs(DEST_DIRECTORY)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(DEST_DIRECTORY, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(DEST_DIRECTORY, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(DEST_DIRECTORY)
def maybe_download_and_extract():
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def download_and_uncompress_tarball(tarball_url, dataset_dir):
"""Downloads the `tarball_url` and uncompresses it locally.
Args:
tarball_url: The URL of a tarball file.
dataset_dir: The directory where the temporary files are stored.
"""
filename = tarball_url.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(tarball_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dataset_dir)
def download(url, dir, fname=None):
mkdir_p(dir)
if fname is None:
fname = url.split('/')[-1]
fpath = os.path.join(dir, fname)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(fname,
min(float(count * block_size)/ total_size,
1.0) * 100.0))
sys.stdout.flush()
try:
fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=_progress)
statinfo = os.stat(fpath)
size = statinfo.st_size
except:
logger.error("Failed to download {}".format(url))
raise
assert size > 0, "Download an empty file!"
sys.stdout.write('\n')
print('Succesfully downloaded ' + fname + " " + str(size) + ' bytes.')
return fpath
def _reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024*duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write(
'\r...%d%%, %d MB, %d KB/s, %d seconds passed' %
(percent, progress_size / (1024*1024), speed, duration)
)
sys.stdout.flush()
urllib.request.urlretrieve(url, dst, _reporthook)
sys.stdout.write('\n')
def download_mnist_data():
print('Downloading {:s}...'.format(train_images))
request.urlretrieve('{:s}/{:s}'.format(parent, train_images), train_images)
print('Done')
print('Downloading {:s}...'.format(train_labels))
request.urlretrieve('{:s}/{:s}'.format(parent, train_labels), train_labels)
print('Done')
print('Downloading {:s}...'.format(test_images))
request.urlretrieve('{:s}/{:s}'.format(parent, test_images), test_images)
print('Done')
print('Downloading {:s}...'.format(test_labels))
request.urlretrieve('{:s}/{:s}'.format(parent, test_labels), test_labels)
print('Done')
print('Converting training data...')
data_train, target_train = load_mnist(train_images, train_labels,
num_train)
print('Done')
print('Converting test data...')
data_test, target_test = load_mnist(test_images, test_labels, num_test)
mnist = {'data': np.append(data_train, data_test, axis=0),
'target': np.append(target_train, target_test, axis=0)}
print('Done')
print('Save output...')
with open('mnist.pkl', 'wb') as output:
six.moves.cPickle.dump(mnist, output, -1)
print('Done')
print('Convert completed')
root = data_root(environ)
path = os.path.join(root, 'exchanges')
folder = os.path.join(path, exchange_name)
ensure_directory(folder)
filename = os.path.join(folder, 'config.json')
url = EXCHANGE_CONFIG_URL.format(exchange=exchange_name)
if os.path.isfile(filename):
# If the file exists, only update periodically to avoid
# unnecessary calls
now = pd.Timestamp.utcnow()
limit = pd.Timedelta(expiry)
if pd.Timedelta(now - last_modified_time(filename)) > limit:
try:
request.urlretrieve(url=url, filename=filename)
except Exception as e:
log.warn(
'unable to update config {} => {}: {}'.format(
url, filename, e
)
)
else:
request.urlretrieve(url=url, filename=filename)
with open(filename) as data_file:
data = json.load(data_file, cls=ExchangeJSONDecoder)
return data
except Exception as e:
log.warn(