Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
LIST_OF_NESTED_COLLECTION_LITERAL_TYPES
LIST_OF_INTERFACES = [
interface.TypedInterface(
{'a': interface.Variable(t, "description 1")},
{'b': interface.Variable(t, "description 2")}
)
for t in LIST_OF_ALL_LITERAL_TYPES
]
LIST_OF_RESOURCE_ENTRIES = [
task.Resources.ResourceEntry(task.Resources.ResourceName.CPU, "1"),
task.Resources.ResourceEntry(task.Resources.ResourceName.GPU, "1"),
task.Resources.ResourceEntry(task.Resources.ResourceName.MEMORY, "1G"),
task.Resources.ResourceEntry(task.Resources.ResourceName.STORAGE, "1G")
]
LIST_OF_RESOURCE_ENTRY_LISTS = [
LIST_OF_RESOURCE_ENTRIES
]
LIST_OF_RESOURCES = [
task.Resources(request, limit)
for request, limit in product(LIST_OF_RESOURCE_ENTRY_LISTS, LIST_OF_RESOURCE_ENTRY_LISTS)
]
LIST_OF_RUNTIME_METADATA = [
task.RuntimeMetadata(task.RuntimeMetadata.RuntimeType.OTHER, "1.0.0", "python"),
_task_models.Resources.ResourceName.GPU,
gpu_request
)
)
if memory_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.MEMORY,
memory_request
)
)
limits = []
if storage_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.STORAGE,
storage_limit
)
)
if cpu_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.CPU,
cpu_limit
)
)
if gpu_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.GPU,
gpu_limit
storage_limit = storage_limit or storage_request
cpu_limit = cpu_limit or cpu_request
gpu_limit = gpu_limit or gpu_request
memory_limit = memory_limit or memory_request
requests = []
if storage_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.STORAGE,
storage_request
)
)
if cpu_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.CPU,
cpu_request
)
)
if gpu_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.GPU,
gpu_request
)
)
if memory_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.MEMORY,
memory_request
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.STORAGE,
storage_request
)
)
if cpu_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.CPU,
cpu_request
)
)
if gpu_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.GPU,
gpu_request
)
)
if memory_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.MEMORY,
memory_request
)
)
limits = []
if storage_limit:
limits.append(
_task_models.Resources.ResourceEntry(
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.CPU,
cpu_limit
)
)
if gpu_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.GPU,
gpu_limit
)
)
if memory_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.MEMORY,
memory_limit
)
)
return (cls or SdkRunnableContainer)(
command=[],
args=[
"pyflyte-execute",
"--task-module",
self.task_module,
"--task-name",
self.task_function_name,
"--inputs",
"{{.input}}",
"--output-prefix",
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.STORAGE,
storage_limit
)
)
if cpu_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.CPU,
cpu_limit
)
)
if gpu_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.GPU,
gpu_limit
)
)
if memory_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.MEMORY,
memory_limit
)
)
return (cls or SdkRunnableContainer)(
command=[],
args=[
"pyflyte-execute",
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.CPU,
cpu_request
)
)
if gpu_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.GPU,
gpu_request
)
)
if memory_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.MEMORY,
memory_request
)
)
limits = []
if storage_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.STORAGE,
storage_limit
)
)
if cpu_limit:
limits.append(
_task_models.Resources.ResourceEntry(
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.STORAGE,
storage_limit
)
)
if cpu_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.CPU,
cpu_limit
)
)
if gpu_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.GPU,
gpu_limit
)
)
if memory_limit:
limits.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.MEMORY,
memory_limit
)
)
if environment is None:
environment = {}
return _task_models.Container(
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.STORAGE,
storage_request
)
)
if cpu_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.CPU,
cpu_request
)
)
if gpu_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.GPU,
gpu_request
)
)
if memory_request:
requests.append(
_task_models.Resources.ResourceEntry(
_task_models.Resources.ResourceName.MEMORY,
memory_request
)
)
limits = []
if storage_limit:
limits.append(
_task_models.Resources.ResourceEntry(