Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@task() # save_in_caches {{{1
def save_in_caches( key, value, timeout = None ):
cache_db = caches['db']
if timeout:
cache.set( key, value, timeout )
cache_db.set( key, value, timeout )
else:
cache.set( key, value )
cache_db.set( key, value )
@task(name="count_interest")
def count():
users = User.objects.filter(account__balance__isnull=False)
if users.exists():
for user in users:
balance = user.balance
# calculates users interest
amount = (balance * 10) / 100
Interest.objects.create(user=user, amount=amount)
# adds users interest to balance.
user.account.balance += amount
user.account.save()
@task(name='prep_instance_for_snapshot', ignore_result=False)
def prep_instance_for_snapshot(identity_id, instance_id, **celery_task_args):
identity = Identity.objects.get(id=identity_id)
try:
celery_logger.debug(
"prep_instance_for_snapshot task started at %s." % timezone.now()
)
# NOTE: FIXMEIF the assumption that the 'linux username'
# is the 'created_by' AtmosphereUser changes.
username = identity.created_by.username
driver = get_esh_driver(identity)
instance = driver.get_instance(instance_id)
if instance.extra.get('status', '') != 'active':
celery_logger.info("prep_instance_for_snapshot skipped")
return
playbook_results = deploy_prepare_snapshot(
instance.ip, username, instance_id
@task(name='Transform.transform_job')
def transform_job(user_json, media_in_json, media_out_json, profile_json, callback_json):
try:
# Avoid 'referenced before assignment'
callback = None
media_out = None
encoder_out = ''
request = current_task.request
# Let's the task begin !
start_date = datetime_now()
start_time = time.time()
print('%s Transform job started' % (request.id))
# Read current configuration to translate files uri to local paths
config = TransformConfig.read('../local_config.pkl')
@task
def db_gauge_task(slug, current_value, **kwargs):
gauge, created = Gauge.objects.get_or_create(slug=slug, defaults={
'name': slug,
'current_value': current_value,
})
if not created:
gauge.current_value = current_value
gauge.save()
@task(name="sync_inventory_resources_task")
def sync_inventory_resources_task(inventory_name):
inventory = Inventory.objects.get(name=inventory_name)
inventory.client().update_resources()
return True
@task(name="attach_task")
def attach_task(
driverCls,
provider,
identity,
instance_id,
volume_id,
device_choice=None,
*args,
**kwargs
):
celery_logger.debug("attach_task started at %s." % timezone.now())
driver = get_driver(driverCls, provider, identity)
from service.volume import attach_volume
attach_volume(driver, instance_id, volume_id, device_choice=device_choice)
attempts = 0
@task(default_retry_delay=600,
max_retries=3,
timeout=10,
rate_limit='10/s')
def whois_ripencc(ip_pk, *args, **kwargs):
try:
_whois(ip_pk)
except Exception as exception:
print exception
raise whois_ripencc.retry(args=[ip_pk],
exc=exception,
kwargs=kwargs)
@task
def fetch_package_metadata(pk):
package = Package.objects.get(pk=pk)
package.fetch_metadata()
print "Updated %s's metadata" % package.slug
@task(name='machine_request_error')
def machine_request_error(task_request, *args, **kwargs):
#Args format: (exception, ?, subtask_args...)
exception = args[0]
machine_request_id = args[2]
task_uuid = task_request.id
celery_logger.info("machine_request_id=%s" % machine_request_id)
celery_logger.info("task_uuid=%s" % (task_uuid, ))
celery_logger.info("exception=%s" % (exception, ))
celery_logger.info("task_kwargs=%s" % kwargs)
machine_request = MachineRequest.objects.get(id=machine_request_id)
result = app.AsyncResult(task_uuid)
with allow_join_result():
result.get(propagate=False)
err_str = _status_to_error(
machine_request.old_status, result.result, result.traceback