For those who’ve worked with asyncio
and aws_lambda_powertools
, I’m working on a simple proof of concept Lambda to demonstrate how async processing can save time. However, I’m seeing no performance improvement between this async Lambda and its non-async counterpart. I assumed this was due to a problem in how I wrote the async code, but when the functions are run locally, the async function shows a huge performance improvement.
Is there something with Lambda that I’m not familiar with what would cause this odd behavior?
Here’s my async code:
app.py
import os
import json
from time import perf_counter
from botocore.config import Config
from aws_lambda_powertools import Logger
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.data_classes.sqs_event import (
SQSEvent, SQSRecord
)
from aws_lambda_powertools.utilities.data_classes import event_source
from aws_lambda_powertools.utilities.batch import (
AsyncBatchProcessor, EventType, async_process_partial_response
)
from adapter.ddb_client import DynamoDBClient
aws_config = Config(
region_name="us-east-1"
)
ddb_client = DynamoDBClient(os.environ["TABLE_NAME"], aws_config)
sqs_client = SQSCLient(os.environ["QUEUE_NAME"], aws_config)
batch_processor = AsyncBatchProcessor(EventType.SQS)
logger = Logger()
async def record_handler(record: SQSRecord):
body = json.loads(record.body)
await ddb_client.write_record_async(body)
@logger.inject_lambda_context
@event_source(data_class=SQSEvent)
def handler(event: SQSEvent, context: LambdaContext):
start_time = perf_counter()
logger.info("Starting standard process...")
resp = async_process_partial_response(
event=event,
record_handler=record_handler,
processor=batch_processor,
context=context
)
end_time = perf_counter()
duration_in_ms = (end_time-start_time) * 1000
resp["duration"] = round(duration_in_ms)
logger.info(f"Duration: {duration_in_ms}ms")
return resp
adapter/ddb_client.py
import asyncio
import boto3
from botocore.config import Config
class DynamoDBClient:
def __init__(self, table: str, config: Config):
self.table = boto3.resource("dynamodb", config=config).Table(table)
self.table_name = table
def write_record(self, record: dict):
return self.table.put_item(
Item=record
)
async def write_record_async(self, record: dict):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(
None, self.write_record, record
)
I’ve tried increasing Lambda memory, which doesn’t work. I am wondering what is different between my local environment and AWS Lambda that would cause this async execution to work on local but not when run in AWS.
Jack Curran is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.