I am planning to migrate to Fargate for autoscaling purposes. I already have task definitions for both my staging and production environments which are currently live. There's two ways I thought I can migrate to Fargate, either create a new Task Definition to use a Fargate and pretty much use the same configuration that my EC2 instance is using. OR, I can export the concerned Task Definition into JSON and replace the values of requiredCompatibilities
and compatibilites
from EC2
to FARGATE
and call it a day, and deploy. Don't know if that would work? Here's a sample Task Definition that I have modified to use FARGATE in aforementioned keys. Some redacted data such as environment variables.
{
"ipcMode": null,
"executionRoleArn": null,
"containerDefinitions": [
{
"dnsSearchDomains": null,
"environmentFiles": null,
"logConfiguration": {
"logDriver": "awslogs",
"secretOptions": null,
"options": {
"awslogs-group": "/ecs/MY-PROJECT-staging",
"awslogs-region": "my-region",
"awslogs-stream-prefix": "ecs"
}
},
"entryPoint": null,
"portMappings": [
{
"hostPort": 8080,
"protocol": "tcp",
"containerPort": 8020
}
],
"command": null,
"linuxParameters": null,
"cpu": 0,
"resourceRequirements": null,
"ulimits": null,
"dnsServers": null,
"mountPoints": [],
"workingDirectory": null,
"secrets": null,
"dockerSecurityOptions": null,
"memory": 300,
"memoryReservation": null,
"volumesFrom": [],
"stopTimeout": null,
"image": "$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG",
"startTimeout": null,
"firelensConfiguration": null,
"dependsOn": null,
"disableNetworking": null,
"interactive": null,
"healthCheck": null,
"essential": true,
"links": null,
"hostname": null,
"extraHosts": null,
"pseudoTerminal": null,
"user": null,
"readonlyRootFilesystem": null,
"dockerLabels": {
"env": "staging"
},
"systemControls": null,
"privileged": null,
"name": "MY-PROJECT-web-stage"
}
],
"placementConstraints": [],
"memory": null,
"taskRoleArn": null,
"compatibilities": [
"FARGATE" | Earlier this was EC2
],
"taskDefinitionArn": "task-definition/MY-PROJECT-staging:10",
"family": "MY-PROJECT-staging",
"requiresAttributes": [
{
"targetId": null,
"targetType": null,
"value": null,
"name": "com.amazonaws.ecs.capability.logging-driver.awslogs"
},
{
"targetId": null,
"targetType": null,
"value": null,
"name": "com.amazonaws.ecs.capability.ecr-auth"
},
{
"targetId": null,
"targetType": null,
"value": null,
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.19"
},
{
"targetId": null,
"targetType": null,
"value": null,
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.18"
}
],
"pidMode": null,
"requiresCompatibilities": [
"FARGATE" | Earlier this was EC2
],
"networkMode": null,
"cpu": null,
"revision": 10,
"status": "ACTIVE",
"inferenceAccelerators": null,
"proxyConfiguration": null,
"volumes": []
}
awsvpc
it needs to be bridged for my usecase in order to map host port with container port.