How to use the @pulumi/pulumi.all function in @pulumi/pulumi

To help you get started, we’ve selected a few @pulumi/pulumi examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pulumi / pulumi-cloud / azure / service.ts View on Github external
preEnv: Record>,
    build: string | cloud.ContainerBuild) {

    const imageName = getBuildImageName(build);
    const registry = getOrCreateGlobalRegistry();

    // This is a container to build; produce a name, either user-specified or auto-computed.
    pulumi.log.debug(`Building container image at '${build}'`, registry);

    const dockerRegistry = pulumi.output({
        registry: registry.loginServer,
        username: registry.adminUsername,
        password: registry.adminPassword,
    });

    const imageOptions = pulumi.all([registry.loginServer, dockerRegistry]).apply(([loginServer, dockerRegistry]) =>
        computeImageFromBuildWorker(preEnv, build, imageName, loginServer + "/" + imageName, dockerRegistry, parent));

    return { imageOptions, registry };
}
github pulumi / examples / gcp-ts-gke-hello-world / index.ts View on Github external
oauthScopes: [
            "https://www.googleapis.com/auth/compute",
            "https://www.googleapis.com/auth/devstorage.read_only",
            "https://www.googleapis.com/auth/logging.write",
            "https://www.googleapis.com/auth/monitoring",
        ],
    },
});

// Export the Cluster name
export const clusterName = cluster.name;

// Manufacture a GKE-style kubeconfig. Note that this is slightly "different"
// because of the way GKE requires gcloud to be in the picture for cluster
// authentication (rather than using the client cert/key directly).
export const kubeconfig = pulumi.
    all([ cluster.name, cluster.endpoint, cluster.masterAuth ]).
    apply(([ name, endpoint, masterAuth ]) => {
        const context = `${gcp.config.project}_${gcp.config.zone}_${name}`;
        return `apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: ${masterAuth.clusterCaCertificate}
    server: https://${endpoint}
  name: ${context}
contexts:
- context:
    cluster: ${context}
    user: ${context}
  name: ${context}
current-context: ${context}
kind: Config
github pulumi / examples / gcp-ts-gke / cluster.ts View on Github external
minMasterVersion: masterVersion,
    masterAuth: { username, password },
    nodeConfig: {
        machineType: nodeMachineType,
        oauthScopes: [
            "https://www.googleapis.com/auth/compute",
            "https://www.googleapis.com/auth/devstorage.read_only",
            "https://www.googleapis.com/auth/logging.write",
            "https://www.googleapis.com/auth/monitoring",
        ],
    },
});

// Manufacture a GKE-style Kubeconfig. Note that this is slightly "different" because of the way GKE requires
// gcloud to be in the picture for cluster authentication (rather than using the client cert/key directly).
export const k8sConfig = pulumi.
    all([ k8sCluster.name, k8sCluster.endpoint, k8sCluster.masterAuth ]).
    apply(([ name, endpoint, auth ]) => {
        const context = `${gcp.config.project}_${gcp.config.zone}_${name}`;
        return `apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: ${auth.clusterCaCertificate}
    server: https://${endpoint}
  name: ${context}
contexts:
- context:
    cluster: ${context}
    user: ${context}
  name: ${context}
current-context: ${context}
kind: Config
github pulumi / examples / kubernetes-ts-staged-rollout-with-prometheus / util.ts View on Github external
export function forwardPrometheusService(
    service: pulumi.Input,
    deployment: pulumi.Input,
    opts: PromPortForwardOpts,
): pulumi.Output<() => void> {
    if (pulumi.runtime.isDryRun()) {
        return pulumi.output(() => undefined);
    }

    return pulumi.all([service, deployment]).apply(([s, d]) => pulumi.all([s.metadata, d.urn])).apply(([meta]) => {
        return new Promise<() => void>((resolve, reject) => {
            const forwarderHandle = spawn("kubectl", [
                "port-forward",
                `service/${meta.name}`,
                `${opts.localPort}:${opts.targetPort || 80}`,
            ]);

            // NOTE: we need to wrap `forwarderHandle.kill` because of JavaScript's `this`
            // semantics.
            forwarderHandle.stdout.on("data", data => resolve(() => forwarderHandle.kill()));
            forwarderHandle.stderr.on("data", data => reject());
        });
    });
}
github pulumi / pulumi-cloud / aws / infrastructure / cluster.ts View on Github external
function getInstanceUserData(
    cluster: aws.ecs.Cluster,
    fileSystem: aws.efs.FileSystem | undefined,
    mountPath: string | undefined,
    cloudFormationStackName: pulumi.Output) {

    const fileSystemId = fileSystem ? fileSystem.id : undefined;

    const all = pulumi.all([fileSystemId, cluster.id, cloudFormationStackName]);
    return all.apply(([fsId, clusterId, stackName]) => {
        let fileSystemRuncmdBlock = "";
        if (fileSystem && mountPath) {
            // This string must be indented exactly as much as the block of commands it's inserted into below!

            // tslint:disable max-line-length
            fileSystemRuncmdBlock = `
                # Create EFS mount path
                mkdir ${mountPath}
                chown ec2-user:ec2-user ${mountPath}
                # Create environment variables
                EFS_FILE_SYSTEM_ID=${fsId}
                DIR_SRC=$AWS_AVAILABILITY_ZONE.$EFS_FILE_SYSTEM_ID.efs.$AWS_REGION.amazonaws.com
                DIR_TGT=${mountPath}
                # Update /etc/fstab with the new NFS mount
                cp -p /etc/fstab /etc/fstab.back-$(date +%F)
github pulumi / actions-pulumify / infra / bucketDirectory.ts View on Github external
{
                        Action: "logs:*",
                        Resource: "arn:aws:logs:*:*:*",
                        Effect: "Allow",
                    },
                ],
            },
        }, { parent });
        const syncFuncPolicyAtt = new aws.iam.RolePolicyAttachment(`${name}-copyfunc-policy-att`, {
            role: syncFuncRole.name,
            policyArn: syncFuncPolicy.arn,
        }, { parent });

        // Return the ARN for the function, but also join with the policy attachment so consumers don't try
        // to use the function before the policy attachment has occurred (this can lead to permissions errors).
        return pulumi.all([ syncFunc.arn, syncFuncPolicyAtt.id ]).apply(([ arn, _ ]) => arn);
    }
github pulumi / pulumi-cloud / aws / service.ts View on Github external
const imageOptions = computeImage(imageName, container, ports, repository);
            const portMappings = (container.ports || []).map(p => ({
                containerPort: p.targetPort || p.port,
                // From https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html:
                // > For task definitions that use the awsvpc network mode, you should only specify the containerPort.
                // > The hostPort can be left blank or it must be the same value as the containerPort.
                //
                // However, if left blank, it will be automatically populated by AWS, potentially leading to dirty
                // diffs even when no changes have been made. Since we are currently always using `awsvpc` mode, we
                // go ahead and populate it with the same value as `containerPort`.
                //
                // See https://github.com/terraform-providers/terraform-provider-aws/issues/3401.
                hostPort: p.targetPort || p.port,
            }));

            return pulumi.all([imageOptions, container.command, container.memory,
                               container.memoryReservation, logGroup.id, container.dockerLabels])
                         .apply(([imageOpts, command, memory, memoryReservation, logGroupId, dockerLabels]) => {
                const keyValuePairs: { name: string, value: string }[] = [];
                for (const key of Object.keys(imageOpts.environment)) {
                    keyValuePairs.push({ name: key, value: imageOpts.environment[key] });
                }

                const containerDefinition: aws.ecs.ContainerDefinition = {
                    name: containerName,
                    image: imageOpts.image,
                    command: command,
                    memory: memory,
                    memoryReservation: memoryReservation,
                    portMappings: portMappings,
                    environment: keyValuePairs,
                    mountPoints: (container.volumes || []).map(v => ({
github pulumi / examples / aws-ts-airflow / index.ts View on Github external
const cacheSubnets = new aws.elasticache.SubnetGroup("cachesubnets", {
    subnetIds: vpc.publicSubnetIds,
});

const cacheCluster = new aws.elasticache.Cluster("cachecluster", {
    engine: "redis",

    nodeType: "cache.t2.micro",
    numCacheNodes: 1,

    subnetGroupName: cacheSubnets.id,
    securityGroupIds: securityGroupIds,
});

const hosts = pulumi.all([db.endpoint.apply(e => e.split(":")[0]), cacheCluster.cacheNodes[0].address]);
const environment = hosts.apply(([postgresHost, redisHost]) => [
    { name: "POSTGRES_HOST", value: postgresHost },
    { name: "POSTGRES_PASSWORD", value: dbPassword },
    { name: "REDIS_HOST", value: redisHost },
    { name: "EXECUTOR", value: "Celery" },
]);

const airflowControllerListener = new awsx.elasticloadbalancingv2.ApplicationListener("airflowcontroller", {
    external: true,
    port: 8080,
    protocol: "HTTP",
});

const airflowController = new awsx.ecs.EC2Service("airflowcontroller", {
    cluster,
    desiredCount: 1,
github pulumi / examples / aws-ts-eks / serviceRole.ts View on Github external
Service: [ service ],
                },
            }],
        }));
        const role = new aws.iam.Role(`${name}-role`, {
            description: args.description,
            assumeRolePolicy: assumeRolePolicy,
        }, { parent: this });
        const rolePolicyAttachments = [];
        for (const policy of (args.managedPolicyArns || [])) {
            rolePolicyAttachments.push(new aws.iam.RolePolicyAttachment(`${name}-${sha1hash(policy)}`, {
                policyArn: policy,
                role: role,
            }, { parent: this }));
        }
        this.role = pulumi.all([role.arn, ...rolePolicyAttachments.map(r => r.id)]).apply(() => role);

        this.registerOutputs({ role: this.role });
    }
}
github pulumi / pulumi-awsx / nodejs / awsx / autoscaling / autoscaling.ts View on Github external
function getCloudFormationTemplate(
    instanceName: string,
    instanceLaunchConfigurationId: pulumi.Output,
    subnetIds: pulumi.Input[],
    targetGroupArns: pulumi.Input[],
    parameters: pulumi.Output): pulumi.Output {

    const subnetIdsArray = pulumi.all(subnetIds);
    return pulumi.all([subnetIdsArray, targetGroupArns, instanceLaunchConfigurationId, parameters])
                 .apply(([subnetIdsArray, targetGroupArns, instanceLaunchConfigurationId, parameters]) => {

    const minSize = ifUndefined(parameters.minSize, 2);
    const maxSize = ifUndefined(parameters.maxSize, 100);
    const desiredCapacity = ifUndefined(parameters.desiredCapacity, minSize);
    const cooldown = ifUndefined(parameters.defaultCooldown, 300);
    const healthCheckGracePeriod = ifUndefined(parameters.healthCheckGracePeriod, 120);
    const healthCheckType = ifUndefined(parameters.healthCheckType, "EC2");
    const suspendProcesses = ifUndefined(parameters.suspendedProcesses, ["ScheduledActions"]);

    let suspendProcessesString = "";
    for (let i = 0, n = suspendProcesses.length; i < n; i++) {
        const sp = suspendProcesses[i];
        if (i > 0) {
            suspendProcessesString += "\n";