I wanted to add file pattern in AWS Glue ETL
job python script where it should generate the files in s3 bucket with pattern dostrp*.csv.gz
but could not find way how to provide this file pattern in python script.
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
args = getResolvedOptions(sys.argv, ['target_BucketName', 'JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
outputbucketname = args['target_BucketName']
# Script generated for node AWS Glue Data Catalog
AWSGlueDataCatalog_node188777777 = glueContext.create_dynamic_frame.from_catalog(database="xxxx", table_name="xxxx", transformation_ctx="AWSGlueDataCatalog_node887777777")
# Script generated for node Amazon S3
AmazonS3_node55566666 = glueContext.write_dynamic_frame.from_options(frame=AWSGlueDataCatalog_node8877777777, connection_type="s3", format="csv", format_options={"separator": "|"}, connection_options={"path": outputbucketname, "compression": "gzip", "partitionKeys": []}, transformation_ctx="AmazonS3_node5566677777")
job.commit()