I have an emr cluster that I am submitting 50 jobs to at the same time (about 3 minutes between the first submission and the last submission). I want all the jobs to run in parallel, and I should see that all the jobs take about the same amount of time to complete. I am seeing that the first 20 jobs take about 2 and a half minutes to run. The last 30 jobs take anywhere from 6 minutes to 10 minutes to run. They are all spark submits with the following configuration
spark-submit --master yarn --deploy-mode client --num-executors=4 --executor-cores=5 --executor-memory 8g --driver-memory 8g --conf spark.dynamicAllocation.enabled=true
I believe there is some issue with resource contention and/or resource allocation because of the changes I made below
Previously I was running 30 jobs with the following configuration and the last 10 jobs would take over an hour to run. After changing the core node size from m5.2xlarge -> m5.24xlarge and the scaling adjustment in the scale out rule from 2 -> 6 and the cooldown from 300 -> 120 I was able to get the last 10 jobs to run in about 9 minutes.
"Configurations": [
{
"Classification": "spark-defaults",
"Properties": {
"spark.driver.defaultJavaOptions": "-XX:OnOutOfMemoryError='kill -9 %p' -XX:MaxHeapFreeRatio=70",
"spark.executor.defaultJavaOptions": "-verbose:gc -Xlog:gc*::time -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:OnOutOfMemoryError='kill -9 %p' -XX:MaxHeapFreeRatio=70 -XX:+IgnoreUnrecognizedVMOptions",
"spark.port.maxRetries": "50",
"spark.dynamicAllocation.preallocateExecutors" : "false"
},
"Configurations": []
}
],
"EbsRootVolumeSize":20,
"StepConcurrencyLevel": 50,
"Instances" : {
"InstanceGroups": [
{
"Name": "Master",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r5.24xlarge",
"InstanceCount": 1,
"EbsConfiguration": {
"EbsBlockDeviceConfigs": [
{
"VolumeSpecification": {
"VolumeType": "gp3",
"SizeInGB": 512
},
"VolumesPerInstance": 1
}
]
}
},
{
"Name": "CORE",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "**m5.2xlarge**",
"InstanceCount": 3,
"EbsConfiguration": {
"EbsBlockDeviceConfigs": [
{
"VolumeSpecification": {
"VolumeType": "gp3",
"SizeInGB": 512
},
"VolumesPerInstance": 1
}
]
},
"AutoScalingPolicy":
{
"Constraints":
{
"MinCapacity": 3,
"MaxCapacity": 90
},
"Rules":
[
{
"Name": "Default-scale-out",
"Description": "Replicates the default scale-out rule in the console for YARN memory.",
"Action":{
"SimpleScalingPolicyConfiguration":{
"AdjustmentType": "CHANGE_IN_CAPACITY",
**"ScalingAdjustment": 1,**
**"CoolDown": 300**
}
},
"Trigger":{
"CloudWatchAlarmDefinition":{
"ComparisonOperator": "LESS_THAN",
"EvaluationPeriods": 1,
"MetricName": "YARNMemoryAvailablePercentage",
"Namespace": "AWS/ElasticMapReduce",
"Period": 60,
"Threshold": 15,
"Statistic": "AVERAGE",
"Unit": "PERCENT",
"Dimensions":[
{
"Key" : "JobFlowId",
"Value" : "${emr.clusterId}"
}
]
}
}
},
{
"Name": "Default-scale-in",
"Description": "Replicates the default scale-in rule in the console for YARN memory.",
"Action":{
"SimpleScalingPolicyConfiguration":{
"AdjustmentType": "CHANGE_IN_CAPACITY",
**"ScalingAdjustment": -1,**
**"CoolDown": 300**
}
},
"Trigger":{
"CloudWatchAlarmDefinition":{
"ComparisonOperator": "GREATER_THAN",
"EvaluationPeriods": 1,
"MetricName": "YARNMemoryAvailablePercentage",
"Namespace": "AWS/ElasticMapReduce",
"Period": 300,
"Threshold": 60,
"Statistic": "AVERAGE",
"Unit": "PERCENT",
"Dimensions":[
{
"Key" : "JobFlowId",
"Value" : "${emr.clusterId}"
Latest configurations
"Configurations": [
{
"Classification": "spark-defaults",
"Properties": {
"spark.driver.defaultJavaOptions": "-XX:OnOutOfMemoryError='kill -9 %p' -XX:MaxHeapFreeRatio=70",
"spark.executor.defaultJavaOptions": "-verbose:gc -Xlog:gc*::time -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:OnOutOfMemoryError='kill -9 %p' -XX:MaxHeapFreeRatio=70 -XX:+IgnoreUnrecognizedVMOptions",
"spark.port.maxRetries": "50",
"spark.dynamicAllocation.preallocateExecutors" : "false"
},
"Configurations": []
}
],
"EbsRootVolumeSize":20,
"StepConcurrencyLevel": 50,
"Instances" : {
"InstanceGroups": [
{
"Name": "Master",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r5.24xlarge",
"InstanceCount": 1,
"EbsConfiguration": {
"EbsBlockDeviceConfigs": [
{
"VolumeSpecification": {
"VolumeType": "gp3",
"SizeInGB": 512
},
"VolumesPerInstance": 1
}
]
}
},
{
"Name": "CORE",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
**"InstanceType": "m5.24xlarge",**
"InstanceCount": 3,
"EbsConfiguration": {
"EbsBlockDeviceConfigs": [
{
"VolumeSpecification": {
"VolumeType": "gp3",
"SizeInGB": 512
},
"VolumesPerInstance": 1
}
]
},
"AutoScalingPolicy":
{
"Constraints":
{
"MinCapacity": 3,
**"MaxCapacity": 30**
},
"Rules":
[
{
"Name": "Default-scale-out",
"Description": "Replicates the default scale-out rule in the console for YARN memory.",
"Action":{
"SimpleScalingPolicyConfiguration":{
"AdjustmentType": "CHANGE_IN_CAPACITY",
**"ScalingAdjustment": 6,**
**"CoolDown": 120**
}
},
"Trigger":{
"CloudWatchAlarmDefinition":{
"ComparisonOperator": "LESS_THAN",
"EvaluationPeriods": 1,
"MetricName": "YARNMemoryAvailablePercentage",
"Namespace": "AWS/ElasticMapReduce",
"Period": 300,
"Threshold": 15,
"Statistic": "AVERAGE",
"Unit": "PERCENT",
"Dimensions":[
{
"Key" : "JobFlowId",
"Value" : "${emr.clusterId}"
}
]
}
}
},
{
"Name": "Default-scale-in",
"Description": "Replicates the default scale-in rule in the console for YARN memory.",
"Action":{
"SimpleScalingPolicyConfiguration":{
"AdjustmentType": "CHANGE_IN_CAPACITY",
"ScalingAdjustment": -3,
"CoolDown": 120
}
},
"Trigger":{
"CloudWatchAlarmDefinition":{
"ComparisonOperator": "GREATER_THAN",
"EvaluationPeriods": 1,
"MetricName": "YARNMemoryAvailablePercentage",
"Namespace": "AWS/ElasticMapReduce",
"Period": 300,
"Threshold": 85,
"Statistic": "AVERAGE",
"Unit": "PERCENT",
"Dimensions":[
{
"Key" : "JobFlowId",
"Value" : "${emr.clusterId}"
Although this is an improvement, the fact that the last jobs take long than then first submitted jobs indicates that the jobs are not all running in parallel as I am aiming to do.
I am also attaching the run times for your convenience
first jobs
middle jobs
last jobs
Please let me know the reason this may be occuring, or any ideas you might have. Thank you in advance!
above
Nene Morales is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.