import sys
import boto3
import json
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from pyspark.sql import Row
from datetime import datetime
# Initialize GlueContext, Spark session, and Job
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
args = {
'JOB_NAME': 'SampleGlueJob'
}
job.init(args['JOB_NAME'], args)
# Define the S3 bucket and folder to save the JSON files
s3_bucket = "<bucket name>"
s3_folder = "<Folder name>"
# Create a dummy PySpark DataFrame
data = [
Row(id=1, name="Alice", age=29),
Row(id=2, name="Bob", age=34),
Row(id=3, name="Charlie", age=25)
]
df = spark.createDataFrame(data)
# Show the DataFrame
print("Dummy DataFrame:")
df.show()
# Function to save each row as a JSON file in S3
def save_row_as_json(row):
try:
# Initialize the S3 client inside the function
s3 = boto3.client('s3')
row_dict = row.asDict()
unique_id = row_dict.get("id") # Use a unique identifier for the file name
current_date = datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"{unique_id}_{current_date}.json"
json_body = json.dumps(row_dict, indent=2)
json_file_path = f"{s3_folder}/{filename}"
# Save to S3
s3.put_object(Bucket=s3_bucket, Key=json_file_path, Body=json_body)
print(f"Successfully saved row {unique_id} to s3://{s3_bucket}/{json_file_path}")
except Exception as e:
print(f"Error saving row {row_dict.get('id')} to S3: {str(e)}")
# Save each row as a separate JSON file
df.foreach(save_row_as_json)
# Commit the Glue job
job. Commit()
This is my glue job. Which converts each row in pyspark dataframe to json document. However, it is not happening. Guide me here. I have tried multiple methods like initialising s3 using boto inside the function, using convert pyspark df to rdd and then save each row to json document but nothing seems working. Any help will be greatly appreciated
Your is very broad and lacks crucial details about the specifics, making it difficult to provide a precise answer. A glue python shell job seems like the most aligned solution to your approach. Refer to AWS’s documentation for glue python shell job at https://docs.aws.amazon.com/glue/latest/dg/add-job-python.html
1