From b5fc7b87ddf5f1c9d8e3f22049ccb4c4e49f69d0 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Wed, 16 Jul 2025 01:29:23 -0700 Subject: [PATCH 01/16] Python: Add S3 Batch scenario --- .../s3/scenarios/batch/s3_batch.py | 601 ++++++++++++++++++ 1 file changed, 601 insertions(+) create mode 100644 python/example_code/s3/scenarios/batch/s3_batch.py diff --git a/python/example_code/s3/scenarios/batch/s3_batch.py b/python/example_code/s3/scenarios/batch/s3_batch.py new file mode 100644 index 00000000000..0bca765e1de --- /dev/null +++ b/python/example_code/s3/scenarios/batch/s3_batch.py @@ -0,0 +1,601 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +This module provides functionality for AWS S3 Batch Operations. +It includes classes for managing CloudFormation stacks and S3 batch scenarios. +""" + +import json +import time +import uuid + +import boto3 +from botocore.exceptions import ClientError, WaiterError + +class CloudFormationHelper: + """Helper class for managing CloudFormation stack operations.""" + # Change the value of 'region' to your preferred AWS Region. + def __init__(self, region_name='us-west-2'): + """ + Initialize CloudFormation helper. + + Args: + region_name (str): AWS region name + """ + # Create a CloudFormation client for the specified region + self.cfn_client = boto3.client('cloudformation', region_name=region_name) + + def deploy_cloudformation_stack(self, stack_name): + """ + Deploy a CloudFormation stack with S3 batch operation permissions. + + Args: + stack_name (str): Name of the CloudFormation stack + + Raises: + ClientError: If stack creation fails + """ + try: + # Define the CloudFormation template + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "S3BatchRole": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": + "batchoperations.s3.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + }, + "Policies": [ + { + "PolicyName": "S3BatchOperationsPolicy", + "PolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:PutObjectTagging", + "s3:GetObjectTagging" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject" + ], + "Resource": "*" + } + ] + } + } + ] + } + } + }, + "Outputs": { + "S3BatchRoleArn": { + "Description": "ARN of IAM Role for S3 Batch Operations", + "Value": {"Fn::GetAtt": ["S3BatchRole", "Arn"]} + } + } + } + + self.cfn_client.create_stack( + StackName=stack_name, + TemplateBody=json.dumps(template), + Capabilities=['CAPABILITY_IAM'] + ) + + print(f"Creating stack {stack_name}...") + self._wait_for_stack_completion(stack_name, 'CREATE') + print(f"Stack {stack_name} created successfully") + + except ClientError as e: + print(f"Error creating CloudFormation stack: {e}") + raise + + def get_stack_outputs(self, stack_name): + """ + Get CloudFormation stack outputs. + + Args: + stack_name (str): Name of the CloudFormation stack + + Returns: + dict: Stack outputs + + Raises: + ClientError: If getting stack outputs fails + """ + try: + response = self.cfn_client.describe_stacks(StackName=stack_name) + outputs = {} + if 'Stacks' in response and response['Stacks']: + for output in response['Stacks'][0].get('Outputs', []): + outputs[output['OutputKey']] = output['OutputValue'] + return outputs + + except ClientError as e: + print(f"Error getting stack outputs: {e}") + raise + + def destroy_cloudformation_stack(self, stack_name): + """ + Delete a CloudFormation stack. + + Args: + stack_name (str): Name of the CloudFormation stack + + Raises: + ClientError: If stack deletion fails + """ + try: + self.cfn_client.delete_stack(StackName=stack_name) + print(f"Deleting stack {stack_name}...") + self._wait_for_stack_completion(stack_name, 'DELETE') + print(f"Stack {stack_name} deleted successfully") + + except ClientError as e: + print(f"Error deleting CloudFormation stack: {e}") + raise + + def _wait_for_stack_completion(self, stack_name, operation): + """ + Wait for CloudFormation stack operation to complete. + + Args: + stack_name (str): Name of the CloudFormation stack + operation (str): Stack operation (CREATE or DELETE) + + Raises: + WaiterError: If waiting for stack completion fails + """ + try: + waiter = self.cfn_client.get_waiter( + 'stack_create_complete' if operation == 'CREATE' + else 'stack_delete_complete' + ) + waiter.wait( + StackName=stack_name, + WaiterConfig={'Delay': 5, 'MaxAttempts': 60} + ) + except WaiterError as e: + print(f"Error waiting for stack {operation}: {e}") + raise + +class S3BatchScenario: + """Class for managing S3 Batch Operations scenarios.""" + + DASHES = "-" * 80 + STACK_NAME = "MyS3Stack" + + def __init__(self, region_name='us-west-2'): + """ + Initialize S3 Batch Operations scenario. + + Args: + region_name (str): AWS region name + """ + self.region_name = region_name + self.s3_client = boto3.client('s3', region_name=region_name) + self.s3control_client = boto3.client('s3control', region_name=region_name) + self.sts_client = boto3.client('sts', region_name=region_name) + + def get_account_id(self): + """ + Get AWS account ID. + + Returns: + str: AWS account ID + """ + return self.sts_client.get_caller_identity()["Account"] + + def create_bucket(self, bucket_name): + """ + Create an S3 bucket. + + Args: + bucket_name (str): Name of the bucket to create + + Raises: + ClientError: If bucket creation fails + """ + try: + if self.region_name != 'us-east-1': + self.s3_client.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={ + 'LocationConstraint': self.region_name + } + ) + else: + self.s3_client.create_bucket(Bucket=bucket_name) + print(f"Created bucket: {bucket_name}") + except ClientError as e: + print(f"Error creating bucket: {e}") + raise + + def upload_files_to_bucket(self, bucket_name, file_names): + """ + Upload files to S3 bucket including manifest file. + + Args: + bucket_name (str): Target bucket name + file_names (list): List of file names to upload + + Returns: + str: ETag of the manifest file + + Raises: + ClientError: If file upload fails + """ + try: + for file_name in file_names: + if file_name != "job-manifest.csv": + content = f"Content for {file_name}" + self.s3_client.put_object( + Bucket=bucket_name, + Key=file_name, + Body=content.encode('utf-8') + ) + print(f"Uploaded {file_name} to {bucket_name}") + + manifest_content = "" + for file_name in file_names: + if file_name != "job-manifest.csv": + manifest_content += f"{bucket_name},{file_name}\n" + + manifest_response = self.s3_client.put_object( + Bucket=bucket_name, + Key="job-manifest.csv", + Body=manifest_content.encode('utf-8') + ) + print(f"Uploaded manifest file to {bucket_name}") + print(f"Manifest content:\n{manifest_content}") + return manifest_response['ETag'].strip('"') + + except ClientError as e: + print(f"Error uploading files: {e}") + raise + + def create_s3_batch_job(self, account_id, role_arn, manifest_location, + report_bucket_name): + """ + Create an S3 batch operation job. + + Args: + account_id (str): AWS account ID + role_arn (str): IAM role ARN for batch operations + manifest_location (str): Location of the manifest file + report_bucket_name (str): Bucket for job reports + + Returns: + str: Job ID + + Raises: + ClientError: If job creation fails + """ + try: + # Extract bucket name from manifest location + bucket_name = manifest_location.split(':::')[1].split('/')[0] + manifest_key = 'job-manifest.csv' + # Get the ETag of the manifest file for verification + manifest_obj = self.s3_client.head_object( + Bucket=bucket_name, + Key=manifest_key + ) + etag = manifest_obj['ETag'].strip('"') + # Create the batch job with specified parameters + response = self.s3control_client.create_job( + AccountId=account_id, + # Define the operation (in this case, adding tags to objects) + Operation={ + 'S3PutObjectTagging': { + 'TagSet': [ + { + 'Key': 'BatchTag', + 'Value': 'BatchValue' + }, + ] + } + }, + # Configure job completion report settings + Report={ + 'Bucket': report_bucket_name, + 'Format': 'Report_CSV_20180820', + 'Enabled': True, + 'Prefix': 'batch-op-reports', + 'ReportScope': 'AllTasks' + }, + Manifest={ + 'Spec': { + 'Format': 'S3BatchOperations_CSV_20180820', + 'Fields': ['Bucket', 'Key'] + }, + 'Location': { + 'ObjectArn': manifest_location, + 'ETag': etag + } + }, + Priority=10, + RoleArn=role_arn, + Description='Batch job for tagging objects', + ConfirmationRequired=True + ) + job_id = response['JobId'] + print(f"Created batch job with ID: {job_id}") + print("Job requires confirmation before starting...") + return job_id + except ClientError as e: + print(f"Error creating batch job: {e}") + if 'Message' in str(e): + print(f"Detailed error message: {e.response['Message']}") + raise + def check_job_failure_reasons(self, job_id, account_id): + """ + Check for any failure reasons of a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + + Returns: + list: List of failure reasons + + Raises: + ClientError: If checking job failure reasons fails + """ + try: + response = self.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + if 'FailureReasons' in response['Job']: + print("Job failure reasons:") + for reason in response['Job']['FailureReasons']: + print(f"- {reason}") + return response['Job'].get('FailureReasons', []) + except ClientError as e: + print(f"Error checking job failure reasons: {e}") + raise + + def wait_for_job_ready(self, job_id, account_id, desired_status='Ready'): + """ + Wait for a job to reach the desired status. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + desired_status (str): Target status to wait for + + Returns: + bool: True if desired status is reached, False otherwise + + Raises: + ClientError: If checking job status fails + """ + print(f"Waiting for job to become {desired_status}...") + max_attempts = 60 + attempt = 0 + while attempt < max_attempts: + try: + response = self.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + current_status = response['Job']['Status'] + print(f"Current job status: {current_status}") + if current_status == desired_status: + return True + if current_status in ['Active', 'Failed', 'Cancelled', 'Complete']: + print(f"Job is in {current_status} state, cannot update priority") + if 'FailureReasons' in response['Job']: + print("Failure reasons:") + for reason in response['Job']['FailureReasons']: + print(f"- {reason}") + return False + + time.sleep(20) + attempt += 1 + except ClientError as e: + print(f"Error checking job status: {e}") + raise + print(f"Timeout waiting for job to become {desired_status}") + return False + + def update_job_priority(self, job_id, account_id): + """ + Update the priority of a batch job and start it. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + + Raises: + ClientError: If updating job priority fails + """ + try: + if self.wait_for_job_ready(job_id, account_id): + self.s3control_client.update_job_priority( + AccountId=account_id, + JobId=job_id, + Priority=20 + ) + print(f"Updated priority for job {job_id}") + self.s3control_client.update_job_status( + AccountId=account_id, + JobId=job_id, + RequestedJobStatus='Active' + ) + print("Job confirmed and started") + else: + print("Could not update job priority as job is not in Ready state") + except ClientError as e: + print(f"Error updating job priority: {e}") + raise + + def cleanup_resources(self, bucket_name, file_names): + """ + Clean up all resources created during the scenario. + + Args: + bucket_name (str): Name of the bucket to clean up + file_names (list): List of files to delete + + Raises: + ClientError: If cleanup fails + """ + try: + for file_name in file_names: + self.s3_client.delete_object(Bucket=bucket_name, Key=file_name) + print(f"Deleted {file_name}") + + response = self.s3_client.list_objects_v2( + Bucket=bucket_name, + Prefix='batch-op-reports/' + ) + if 'Contents' in response: + for obj in response['Contents']: + self.s3_client.delete_object( + Bucket=bucket_name, + Key=obj['Key'] + ) + print(f"Deleted {obj['Key']}") + + self.s3_client.delete_bucket(Bucket=bucket_name) + print(f"Deleted bucket {bucket_name}") + except ClientError as e: + print(f"Error in cleanup: {e}") + raise + + +def wait_for_input(): + """ + Wait for user input to continue. + + Returns: + None + """ + while True: + user_input = input("\nEnter 'c' followed by to continue: ") + if user_input.lower() == 'c': + print("Continuing with the program...\n") + break + print("Invalid input. Please try again.") + + +def setup_resources(scenario, bucket_name, file_names): + """ + Set up initial resources for the scenario. + + Args: + scenario: S3BatchScenario instance + bucket_name (str): Name of the bucket to create + file_names (list): List of files to upload + + Returns: + tuple: Manifest location and report bucket ARN + """ + print("\nSetting up required resources...") + scenario.create_bucket(bucket_name) + report_bucket_arn = f"arn:aws:s3:::{bucket_name}" + manifest_location = f"arn:aws:s3:::{bucket_name}/job-manifest.csv" + scenario.upload_files_to_bucket(bucket_name, file_names) + return manifest_location, report_bucket_arn + + +def main(): + """Main function to run the S3 Batch Operations scenario.""" + region_name = 'us-west-2' + scenario = S3BatchScenario(region_name) + cfn_helper = CloudFormationHelper(region_name) + account_id = scenario.get_account_id() + # Generate a unique bucket name using UUID + bucket_name = f"demo-s3-batch-{str(uuid.uuid4())}" + # Define test files to be created and processed + file_names = [ + "job-manifest.csv", + "object-key-1.txt", + "object-key-2.txt", + "object-key-3.txt", + "object-key-4.txt" + ] + + print(scenario.DASHES) + print("Welcome to the Amazon S3 Batch basics scenario.") + print(""" + S3 Batch operations enables efficient and cost-effective processing of large-scale + data stored in Amazon S3. It automatically scales resources to handle varying workloads + without the need for manual intervention. + + This Python program walks you through Amazon S3 Batch operations. + """) + + try: + # Deploy CloudFormation stack for IAM roles + print("Deploying CloudFormation stack...") + cfn_helper.deploy_cloudformation_stack(scenario.STACK_NAME) + # Get the created IAM role ARN from stack outputs + stack_outputs = cfn_helper.get_stack_outputs(scenario.STACK_NAME) + iam_role_arn = stack_outputs.get('S3BatchRoleArn') + # Set up S3 bucket and upload test files + manifest_location, report_bucket_arn = setup_resources( + scenario, bucket_name, file_names + ) + + wait_for_input() + + print("\n1. Creating S3 Batch Job...") + job_id = scenario.create_s3_batch_job( + account_id, + iam_role_arn, + manifest_location, + report_bucket_arn + ) + + time.sleep(5) + failure_reasons = scenario.check_job_failure_reasons(job_id, account_id) + if failure_reasons: + print("\nJob failed. Please fix the issues and try again.") + if input( + "Do you want to proceed with the rest of the operations? (y/n): " + ).lower() != 'y': + raise ValueError("Job failed, stopping execution") + + wait_for_input() + print("\n2. Updating job priority...") + scenario.update_job_priority(job_id, account_id) + + print("\nCleanup") + if input( + "Do you want to delete the AWS resources used in this scenario? (y/n): " + ).lower() == 'y': + scenario.cleanup_resources(bucket_name, file_names) + cfn_helper.destroy_cloudformation_stack(scenario.STACK_NAME) + + except Exception as e: + print(f"An error occurred: {e}") + raise + + print("\nThe Amazon S3 Batch scenario has successfully completed.") + print(scenario.DASHES) + + +if __name__ == "__main__": + main() From 566d676fa07170d036c35eb1d4ca2b922a98b05d Mon Sep 17 00:00:00 2001 From: John Lwin Date: Wed, 16 Jul 2025 01:55:22 -0700 Subject: [PATCH 02/16] chore(README): Add README for S3 Batch Python --- .../example_code/s3/scenarios/batch/README.md | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 python/example_code/s3/scenarios/batch/README.md diff --git a/python/example_code/s3/scenarios/batch/README.md b/python/example_code/s3/scenarios/batch/README.md new file mode 100644 index 00000000000..9500bcf0049 --- /dev/null +++ b/python/example_code/s3/scenarios/batch/README.md @@ -0,0 +1,56 @@ +# Amazon S3 Batch for the SDK for Python (boto3) + +## Overview + +This example demonstrates how to use the AWS SDK for Python (boto3) to work with Amazon Simple Storage Service (Amazon S3) Batch Scenario. The scenario covers various operations such as creating an AWS Batch compute environment, creating a job queue, creating a job defination, and submitting a job, and so on. + +Here are the top six service operations this scenario covers. + +1. **Create an AWS Batch computer environment**: Creates an AWS Batch computer environment. + +2. **Sets up a job queue**: Creates a job queue that will manage the submission of jobs. + +3. **Creates a job definition**: Creates a job definition that specifies how the jobs should be executed. + +4. **Registers a Job Definition**: Registers a job definition making it available for job submissions. + +5. **Submits a Batch Job**: Submits a job. + +6. **Checks the status of the job**: Checks the status of the job. + +## ⚠ Important + +- Running this code might result in charges to your AWS account. For more details, see [AWS Pricing](https://aws.amazon.com/pricing/) and [Free Tier](https://aws.amazon.com/free/). +- Running the tests might result in charges to your AWS account. +- We recommend that you grant your code least privilege. At most, grant only the minimum permissions required to perform the task. For more information, see [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). +- This code is not tested in every AWS Region. For more information, see [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). + +## Code examples + +### Prerequisites + +To run these examples, you need: + +- Python 3.x installed. +- Run `python pip install -r requirements.txt` +- AWS credentials configured. For more information, see [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). + +#### Running the workflow + +To run this workflow, pull AWS tokens and run the command below: + +```bash +python s3_batch.py +``` + +## Additional resources + +- [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +- [Amazon S3 API Reference](https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html) +- [boto3 Amazon S3 reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html) + +--- + +© Amazon.com, Inc. or its affiliates. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 From 5923e6321d8202ed76e67860d6ffa2fb13167575 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Wed, 16 Jul 2025 02:14:27 -0700 Subject: [PATCH 03/16] test: Add unit tests for S3 Batch Scenario Python --- .../batch/test/test_requirements.txt | 4 + .../s3/scenarios/batch/test/test_s3_batch.py | 291 ++++++++++++++++++ 2 files changed, 295 insertions(+) create mode 100644 python/example_code/s3/scenarios/batch/test/test_requirements.txt create mode 100644 python/example_code/s3/scenarios/batch/test/test_s3_batch.py diff --git a/python/example_code/s3/scenarios/batch/test/test_requirements.txt b/python/example_code/s3/scenarios/batch/test/test_requirements.txt new file mode 100644 index 00000000000..5df1a8f4ba2 --- /dev/null +++ b/python/example_code/s3/scenarios/batch/test/test_requirements.txt @@ -0,0 +1,4 @@ +pytest>=7.0.0 +pytest-mock>=3.10.0 +boto3>=1.26.0 +botocore>=1.29.0 \ No newline at end of file diff --git a/python/example_code/s3/scenarios/batch/test/test_s3_batch.py b/python/example_code/s3/scenarios/batch/test/test_s3_batch.py new file mode 100644 index 00000000000..df0fa21f34a --- /dev/null +++ b/python/example_code/s3/scenarios/batch/test/test_s3_batch.py @@ -0,0 +1,291 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +import pytest +from unittest.mock import Mock, patch, MagicMock +from botocore.exceptions import ClientError, WaiterError +import json + +from s3_batch import CloudFormationHelper, S3BatchScenario, setup_resources + + +class TestCloudFormationHelper: + """Test cases for CloudFormationHelper class.""" + + @pytest.fixture + def cfn_helper(self): + """Create CloudFormationHelper instance for testing.""" + return CloudFormationHelper('us-west-2') + + @patch('boto3.client') + def test_init(self, mock_boto3_client): + """Test CloudFormationHelper initialization.""" + helper = CloudFormationHelper('us-east-1') + mock_boto3_client.assert_called_with('cloudformation', region_name='us-east-1') + + @patch('boto3.client') + def test_deploy_cloudformation_stack_success(self, mock_boto3_client, cfn_helper): + """Test successful CloudFormation stack deployment.""" + mock_client = Mock() + mock_boto3_client.return_value = mock_client + cfn_helper.cfn_client = mock_client + + with patch.object(cfn_helper, '_wait_for_stack_completion'): + cfn_helper.deploy_cloudformation_stack('test-stack') + + mock_client.create_stack.assert_called_once() + call_args = mock_client.create_stack.call_args + assert call_args[1]['StackName'] == 'test-stack' + assert 'CAPABILITY_IAM' in call_args[1]['Capabilities'] + + @patch('boto3.client') + def test_deploy_cloudformation_stack_failure(self, mock_boto3_client, cfn_helper): + """Test CloudFormation stack deployment failure.""" + mock_client = Mock() + mock_client.create_stack.side_effect = ClientError( + {'Error': {'Code': 'ValidationError', 'Message': 'Invalid template'}}, + 'CreateStack' + ) + mock_boto3_client.return_value = mock_client + cfn_helper.cfn_client = mock_client + + with pytest.raises(ClientError): + cfn_helper.deploy_cloudformation_stack('test-stack') + + @patch('boto3.client') + def test_get_stack_outputs_success(self, mock_boto3_client, cfn_helper): + """Test successful retrieval of stack outputs.""" + mock_client = Mock() + mock_client.describe_stacks.return_value = { + 'Stacks': [{ + 'Outputs': [ + {'OutputKey': 'S3BatchRoleArn', 'OutputValue': 'arn:aws:iam::123456789012:role/test-role'} + ] + }] + } + mock_boto3_client.return_value = mock_client + cfn_helper.cfn_client = mock_client + + outputs = cfn_helper.get_stack_outputs('test-stack') + assert outputs['S3BatchRoleArn'] == 'arn:aws:iam::123456789012:role/test-role' + + @patch('boto3.client') + def test_destroy_cloudformation_stack_success(self, mock_boto3_client, cfn_helper): + """Test successful CloudFormation stack deletion.""" + mock_client = Mock() + mock_boto3_client.return_value = mock_client + cfn_helper.cfn_client = mock_client + + with patch.object(cfn_helper, '_wait_for_stack_completion'): + cfn_helper.destroy_cloudformation_stack('test-stack') + + mock_client.delete_stack.assert_called_once_with(StackName='test-stack') + + +class TestS3BatchScenario: + """Test cases for S3BatchScenario class.""" + + @pytest.fixture + def s3_scenario(self): + """Create S3BatchScenario instance for testing.""" + return S3BatchScenario('us-west-2') + + @patch('boto3.client') + def test_init(self, mock_boto3_client): + """Test S3BatchScenario initialization.""" + scenario = S3BatchScenario('us-east-1') + assert mock_boto3_client.call_count == 3 + assert scenario.region_name == 'us-east-1' + + @patch('boto3.client') + def test_get_account_id(self, mock_boto3_client, s3_scenario): + """Test getting AWS account ID.""" + mock_sts_client = Mock() + mock_sts_client.get_caller_identity.return_value = {'Account': '123456789012'} + s3_scenario.sts_client = mock_sts_client + + account_id = s3_scenario.get_account_id() + assert account_id == '123456789012' + + @patch('boto3.client') + def test_create_bucket_us_west_2(self, mock_boto3_client, s3_scenario): + """Test bucket creation in us-west-2.""" + mock_s3_client = Mock() + s3_scenario.s3_client = mock_s3_client + + s3_scenario.create_bucket('test-bucket') + + mock_s3_client.create_bucket.assert_called_once_with( + Bucket='test-bucket', + CreateBucketConfiguration={'LocationConstraint': 'us-west-2'} + ) + + @patch('boto3.client') + def test_create_bucket_us_east_1(self, mock_boto3_client): + """Test bucket creation in us-east-1.""" + scenario = S3BatchScenario('us-east-1') + mock_s3_client = Mock() + scenario.s3_client = mock_s3_client + + scenario.create_bucket('test-bucket') + + mock_s3_client.create_bucket.assert_called_once_with(Bucket='test-bucket') + + @patch('boto3.client') + def test_upload_files_to_bucket(self, mock_boto3_client, s3_scenario): + """Test uploading files to S3 bucket.""" + mock_s3_client = Mock() + mock_s3_client.put_object.return_value = {'ETag': '"test-etag"'} + s3_scenario.s3_client = mock_s3_client + + file_names = ['job-manifest.csv', 'test-file.txt'] + etag = s3_scenario.upload_files_to_bucket('test-bucket', file_names) + + assert etag == 'test-etag' + assert mock_s3_client.put_object.call_count == 2 + + @patch('boto3.client') + def test_create_s3_batch_job_success(self, mock_boto3_client, s3_scenario): + """Test successful S3 batch job creation.""" + mock_s3_client = Mock() + mock_s3_client.head_object.return_value = {'ETag': '"test-etag"'} + mock_s3control_client = Mock() + mock_s3control_client.create_job.return_value = {'JobId': 'test-job-id'} + + s3_scenario.s3_client = mock_s3_client + s3_scenario.s3control_client = mock_s3control_client + + job_id = s3_scenario.create_s3_batch_job( + '123456789012', + 'arn:aws:iam::123456789012:role/test-role', + 'arn:aws:s3:::test-bucket/job-manifest.csv', + 'arn:aws:s3:::test-bucket' + ) + + assert job_id == 'test-job-id' + mock_s3control_client.create_job.assert_called_once() + + @patch('boto3.client') + def test_check_job_failure_reasons(self, mock_boto3_client, s3_scenario): + """Test checking job failure reasons.""" + mock_s3control_client = Mock() + mock_s3control_client.describe_job.return_value = { + 'Job': { + 'FailureReasons': ['Reason 1', 'Reason 2'] + } + } + s3_scenario.s3control_client = mock_s3control_client + + reasons = s3_scenario.check_job_failure_reasons('test-job-id', '123456789012') + + assert reasons == ['Reason 1', 'Reason 2'] + + @patch('boto3.client') + @patch('time.sleep') + def test_wait_for_job_ready_success(self, mock_sleep, mock_boto3_client, s3_scenario): + """Test waiting for job to become ready.""" + mock_s3control_client = Mock() + mock_s3control_client.describe_job.return_value = { + 'Job': {'Status': 'Ready'} + } + s3_scenario.s3control_client = mock_s3control_client + + result = s3_scenario.wait_for_job_ready('test-job-id', '123456789012') + + assert result is True + + @patch('boto3.client') + def test_update_job_priority_success(self, mock_boto3_client, s3_scenario): + """Test successful job priority update.""" + mock_s3control_client = Mock() + s3_scenario.s3control_client = mock_s3control_client + + with patch.object(s3_scenario, 'wait_for_job_ready', return_value=True): + s3_scenario.update_job_priority('test-job-id', '123456789012') + + mock_s3control_client.update_job_priority.assert_called_once() + mock_s3control_client.update_job_status.assert_called_once() + + @patch('boto3.client') + def test_cleanup_resources(self, mock_boto3_client, s3_scenario): + """Test resource cleanup.""" + mock_s3_client = Mock() + mock_s3_client.list_objects_v2.return_value = { + 'Contents': [{'Key': 'batch-op-reports/report1.csv'}] + } + s3_scenario.s3_client = mock_s3_client + + file_names = ['test-file.txt'] + s3_scenario.cleanup_resources('test-bucket', file_names) + + assert mock_s3_client.delete_object.call_count == 2 # file + report + mock_s3_client.delete_bucket.assert_called_once_with(Bucket='test-bucket') + + +class TestUtilityFunctions: + """Test cases for utility functions.""" + + @patch('s3_batch.input', return_value='c') + def test_wait_for_input_valid(self, mock_input): + """Test wait_for_input with valid input.""" + from s3_batch import wait_for_input + wait_for_input() # Should not raise exception + + @patch('s3_batch.input', side_effect=['invalid', 'c']) + def test_wait_for_input_invalid_then_valid(self, mock_input): + """Test wait_for_input with invalid then valid input.""" + from s3_batch import wait_for_input + wait_for_input() # Should not raise exception + + def test_setup_resources(self): + """Test setup_resources function.""" + mock_scenario = Mock() + + manifest_location, report_bucket_arn = setup_resources( + mock_scenario, 'test-bucket', ['file1.txt', 'file2.txt'] + ) + + assert manifest_location == 'arn:aws:s3:::test-bucket/job-manifest.csv' + assert report_bucket_arn == 'arn:aws:s3:::test-bucket' + mock_scenario.create_bucket.assert_called_once_with('test-bucket') + mock_scenario.upload_files_to_bucket.assert_called_once() + + +class TestErrorHandling: + """Test cases for error handling scenarios.""" + + @pytest.fixture + def s3_scenario(self): + """Create S3BatchScenario instance for testing.""" + return S3BatchScenario('us-west-2') + + @patch('boto3.client') + def test_create_bucket_client_error(self, mock_boto3_client, s3_scenario): + """Test bucket creation with ClientError.""" + mock_s3_client = Mock() + mock_s3_client.create_bucket.side_effect = ClientError( + {'Error': {'Code': 'BucketAlreadyExists', 'Message': 'Bucket exists'}}, + 'CreateBucket' + ) + s3_scenario.s3_client = mock_s3_client + + with pytest.raises(ClientError): + s3_scenario.create_bucket('test-bucket') + + @patch('boto3.client') + def test_create_s3_batch_job_client_error(self, mock_boto3_client, s3_scenario): + """Test S3 batch job creation with ClientError.""" + mock_s3_client = Mock() + mock_s3_client.head_object.side_effect = ClientError( + {'Error': {'Code': 'NoSuchKey', 'Message': 'Key not found'}}, + 'HeadObject' + ) + s3_scenario.s3_client = mock_s3_client + + with pytest.raises(ClientError): + s3_scenario.create_s3_batch_job( + '123456789012', + 'arn:aws:iam::123456789012:role/test-role', + 'arn:aws:s3:::test-bucket/job-manifest.csv', + 'arn:aws:s3:::test-bucket' + ) \ No newline at end of file From 304372fb6e373f25c278e29a9d41cf0e762f6559 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Mon, 21 Jul 2025 17:59:22 -0700 Subject: [PATCH 04/16] Fix job activation and update tests --- .../s3/scenarios/batch/s3_batch.py | 75 ++++++++++++----- .../s3/scenarios/batch/test/test_s3_batch.py | 81 ++++++++++++++++--- 2 files changed, 125 insertions(+), 31 deletions(-) diff --git a/python/example_code/s3/scenarios/batch/s3_batch.py b/python/example_code/s3/scenarios/batch/s3_batch.py index 0bca765e1de..bb5ed67ebeb 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch.py +++ b/python/example_code/s3/scenarios/batch/s3_batch.py @@ -1,6 +1,7 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +# snippet-start:[python.example_code.s3.S3Batch.scenario] """ This module provides functionality for AWS S3 Batch Operations. It includes classes for managing CloudFormation stacks and S3 batch scenarios. @@ -57,6 +58,9 @@ def deploy_cloudformation_stack(self, stack_name): } ] }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/AmazonS3FullAccess" + ], "Policies": [ { "PolicyName": "S3BatchOperationsPolicy", @@ -66,18 +70,8 @@ def deploy_cloudformation_stack(self, stack_name): { "Effect": "Allow", "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:PutObjectTagging", - "s3:GetObjectTagging" - ], - "Resource": "*" - }, - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject" + "s3:*", + "s3-object-lambda:*" ], "Resource": "*" } @@ -336,11 +330,12 @@ def create_s3_batch_job(self, account_id, role_arn, manifest_location, Priority=10, RoleArn=role_arn, Description='Batch job for tagging objects', - ConfirmationRequired=True + # Set to False to avoid confirmation requirement + ConfirmationRequired=False ) job_id = response['JobId'] print(f"Created batch job with ID: {job_id}") - print("Job requires confirmation before starting...") + print("Job created and should start automatically") return job_id except ClientError as e: print(f"Error creating batch job: {e}") @@ -403,6 +398,11 @@ def wait_for_job_ready(self, job_id, account_id, desired_status='Ready'): print(f"Current job status: {current_status}") if current_status == desired_status: return True + # For jobs with ConfirmationRequired=True, they start in Suspended state + # and need to be activated + if current_status == 'Suspended': + print("Job is in Suspended state, can proceed with activation") + return True if current_status in ['Active', 'Failed', 'Cancelled', 'Complete']: print(f"Job is in {current_status} state, cannot update priority") if 'FailureReasons' in response['Job']: @@ -431,21 +431,40 @@ def update_job_priority(self, job_id, account_id): ClientError: If updating job priority fails """ try: - if self.wait_for_job_ready(job_id, account_id): + # Check current job status + response = self.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + current_status = response['Job']['Status'] + print(f"Current job status before update: {current_status}") + print(f"Full job details: {response['Job']}") + + # First try to update the job priority + try: self.s3control_client.update_job_priority( AccountId=account_id, JobId=job_id, Priority=20 ) - print(f"Updated priority for job {job_id}") + print(f"Successfully updated priority for job {job_id}") + except ClientError as e: + print(f"Warning: Could not update job priority: {e}") + # Continue anyway to try activating the job + + # Then try to activate the job + try: self.s3control_client.update_job_status( AccountId=account_id, JobId=job_id, RequestedJobStatus='Active' ) - print("Job confirmed and started") - else: - print("Could not update job priority as job is not in Ready state") + print(f"Successfully activated job {job_id}") + except ClientError as e: + print(f"Error activating job: {e}") + if 'Message' in str(e): + print(f"Detailed error message: {e.response.get('Message', '')}") + raise except ClientError as e: print(f"Error updating job priority: {e}") raise @@ -579,8 +598,21 @@ def main(): raise ValueError("Job failed, stopping execution") wait_for_input() - print("\n2. Updating job priority...") - scenario.update_job_priority(job_id, account_id) + print("\n2. Checking job status...") + # Get current job status instead of trying to update priority + response = scenario.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + current_status = response['Job']['Status'] + print(f"Current job status: {current_status}") + + # Only try to update priority if job is not already active + if current_status not in ['Active', 'Complete']: + print("\nUpdating job priority...") + scenario.update_job_priority(job_id, account_id) + else: + print("Job is already active or complete, no need to update priority.") print("\nCleanup") if input( @@ -599,3 +631,4 @@ def main(): if __name__ == "__main__": main() +# snippet-end:[python.example_code.s3.S3Batch.scenario] diff --git a/python/example_code/s3/scenarios/batch/test/test_s3_batch.py b/python/example_code/s3/scenarios/batch/test/test_s3_batch.py index df0fa21f34a..78d915865db 100644 --- a/python/example_code/s3/scenarios/batch/test/test_s3_batch.py +++ b/python/example_code/s3/scenarios/batch/test/test_s3_batch.py @@ -1,10 +1,12 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -import pytest -from unittest.mock import Mock, patch, MagicMock -from botocore.exceptions import ClientError, WaiterError +"""Unit tests for S3 batch operations module.""" + import json +import pytest +from unittest.mock import Mock, patch +from botocore.exceptions import ClientError from s3_batch import CloudFormationHelper, S3BatchScenario, setup_resources @@ -20,7 +22,7 @@ def cfn_helper(self): @patch('boto3.client') def test_init(self, mock_boto3_client): """Test CloudFormationHelper initialization.""" - helper = CloudFormationHelper('us-east-1') + CloudFormationHelper('us-east-1') mock_boto3_client.assert_called_with('cloudformation', region_name='us-east-1') @patch('boto3.client') @@ -29,14 +31,17 @@ def test_deploy_cloudformation_stack_success(self, mock_boto3_client, cfn_helper mock_client = Mock() mock_boto3_client.return_value = mock_client cfn_helper.cfn_client = mock_client - with patch.object(cfn_helper, '_wait_for_stack_completion'): cfn_helper.deploy_cloudformation_stack('test-stack') - mock_client.create_stack.assert_called_once() call_args = mock_client.create_stack.call_args assert call_args[1]['StackName'] == 'test-stack' assert 'CAPABILITY_IAM' in call_args[1]['Capabilities'] + + # Verify the template includes AmazonS3FullAccess policy + template_body = json.loads(call_args[1]['TemplateBody']) + assert 'ManagedPolicyArns' in template_body['Resources']['S3BatchRole']['Properties'] + assert 'arn:aws:iam::aws:policy/AmazonS3FullAccess' in template_body['Resources']['S3BatchRole']['Properties']['ManagedPolicyArns'] @patch('boto3.client') def test_deploy_cloudformation_stack_failure(self, mock_boto3_client, cfn_helper): @@ -48,7 +53,6 @@ def test_deploy_cloudformation_stack_failure(self, mock_boto3_client, cfn_helper ) mock_boto3_client.return_value = mock_client cfn_helper.cfn_client = mock_client - with pytest.raises(ClientError): cfn_helper.deploy_cloudformation_stack('test-stack') @@ -61,7 +65,7 @@ def test_get_stack_outputs_success(self, mock_boto3_client, cfn_helper): 'Outputs': [ {'OutputKey': 'S3BatchRoleArn', 'OutputValue': 'arn:aws:iam::123456789012:role/test-role'} ] - }] + }] } mock_boto3_client.return_value = mock_client cfn_helper.cfn_client = mock_client @@ -82,6 +86,7 @@ def test_destroy_cloudformation_stack_success(self, mock_boto3_client, cfn_helpe mock_client.delete_stack.assert_called_once_with(StackName='test-stack') + class TestS3BatchScenario: """Test cases for S3BatchScenario class.""" @@ -164,6 +169,10 @@ def test_create_s3_batch_job_success(self, mock_boto3_client, s3_scenario): assert job_id == 'test-job-id' mock_s3control_client.create_job.assert_called_once() + + # Verify ConfirmationRequired is set to False + call_args = mock_s3control_client.create_job.call_args + assert call_args[1]['ConfirmationRequired'] is False @patch('boto3.client') def test_check_job_failure_reasons(self, mock_boto3_client, s3_scenario): @@ -193,18 +202,68 @@ def test_wait_for_job_ready_success(self, mock_sleep, mock_boto3_client, s3_scen result = s3_scenario.wait_for_job_ready('test-job-id', '123456789012') assert result is True + + @patch('boto3.client') + @patch('time.sleep') + def test_wait_for_job_ready_suspended(self, mock_sleep, mock_boto3_client, s3_scenario): + """Test waiting for job with Suspended status.""" + mock_s3control_client = Mock() + mock_s3control_client.describe_job.return_value = { + 'Job': {'Status': 'Suspended'} + } + s3_scenario.s3control_client = mock_s3control_client + + result = s3_scenario.wait_for_job_ready('test-job-id', '123456789012') + + assert result is True @patch('boto3.client') def test_update_job_priority_success(self, mock_boto3_client, s3_scenario): """Test successful job priority update.""" mock_s3control_client = Mock() + mock_s3control_client.describe_job.return_value = { + 'Job': {'Status': 'Suspended'} + } s3_scenario.s3control_client = mock_s3control_client - with patch.object(s3_scenario, 'wait_for_job_ready', return_value=True): - s3_scenario.update_job_priority('test-job-id', '123456789012') + s3_scenario.update_job_priority('test-job-id', '123456789012') mock_s3control_client.update_job_priority.assert_called_once() mock_s3control_client.update_job_status.assert_called_once() + + @patch('boto3.client') + def test_update_job_priority_with_ready_status(self, mock_boto3_client, s3_scenario): + """Test job priority update with Ready status.""" + mock_s3control_client = Mock() + mock_s3control_client.describe_job.return_value = { + 'Job': {'Status': 'Ready'} + } + s3_scenario.s3control_client = mock_s3control_client + + s3_scenario.update_job_priority('test-job-id', '123456789012') + + mock_s3control_client.update_job_priority.assert_called_once() + mock_s3control_client.update_job_status.assert_called_once() + + @patch('boto3.client') + def test_update_job_priority_error_handling(self, mock_boto3_client, s3_scenario): + """Test error handling in job priority update.""" + mock_s3control_client = Mock() + mock_s3control_client.describe_job.return_value = { + 'Job': {'Status': 'Suspended'} + } + mock_s3control_client.update_job_priority.side_effect = ClientError( + {'Error': {'Code': 'InvalidRequest', 'Message': 'Cannot update priority'}}, + 'UpdateJobPriority' + ) + mock_s3control_client.update_job_status = Mock() + s3_scenario.s3control_client = mock_s3control_client + + # Should not raise exception due to error handling + s3_scenario.update_job_priority('test-job-id', '123456789012') + + # Should still try to activate the job even if priority update fails + mock_s3control_client.update_job_status.assert_called_once() @patch('boto3.client') def test_cleanup_resources(self, mock_boto3_client, s3_scenario): @@ -228,12 +287,14 @@ class TestUtilityFunctions: @patch('s3_batch.input', return_value='c') def test_wait_for_input_valid(self, mock_input): """Test wait_for_input with valid input.""" + # pylint: disable=import-outside-toplevel from s3_batch import wait_for_input wait_for_input() # Should not raise exception @patch('s3_batch.input', side_effect=['invalid', 'c']) def test_wait_for_input_invalid_then_valid(self, mock_input): """Test wait_for_input with invalid then valid input.""" + # pylint: disable=import-outside-toplevel from s3_batch import wait_for_input wait_for_input() # Should not raise exception From 9c1f6fb051210ca83a424affd072bbe5cb0a3ed4 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Tue, 22 Jul 2025 17:56:46 -0700 Subject: [PATCH 05/16] chore(metadata): Add new metadata to s3-control-metadata --- .doc_gen/metadata/s3-control_metadata.yaml | 41 ++++++++++++++----- .../s3/scenarios/batch/s3_batch.py | 4 +- 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/.doc_gen/metadata/s3-control_metadata.yaml b/.doc_gen/metadata/s3-control_metadata.yaml index 5b34a0a4eac..b302756a413 100644 --- a/.doc_gen/metadata/s3-control_metadata.yaml +++ b/.doc_gen/metadata/s3-control_metadata.yaml @@ -29,15 +29,15 @@ s3-control_CreateJob: - s3control.java2.create_job.async.main - description: Create a compliance retention job. snippet_tags: - - s3control.java2.create_job.compliance.main + - s3control.java2.create_job.compliance.main - description: Create a legal hold off job. snippet_tags: - - s3control.java2.create_job.compliance.main + - s3control.java2.create_job.compliance.main - description: Create a new governance retention job. snippet_tags: - - s3.java2.create_governance_retemtion.main + - s3.java2.create_governance_retemtion.main services: - s3-control: {CreateJob} + s3-control: {CreateJob} s3-control_PutJobTagging: languages: Java: @@ -50,7 +50,7 @@ s3-control_PutJobTagging: snippet_tags: - s3control.java2.job.put.tags.main services: - s3-control: {PutJobTagging} + s3-control: {PutJobTagging} s3-control_DescribeJob: languages: Java: @@ -63,7 +63,7 @@ s3-control_DescribeJob: snippet_tags: - s3control.java2.describe_job.main services: - s3-control: {DescribeJob} + s3-control: {DescribeJob} s3-control_DeleteJobTagging: languages: Java: @@ -76,7 +76,7 @@ s3-control_DeleteJobTagging: snippet_tags: - s3control.java2.del_job_tagging.main services: - s3-control: {DeleteJobTagging} + s3-control: {DeleteJobTagging} s3-control_GetJobTagging: languages: Java: @@ -89,7 +89,7 @@ s3-control_GetJobTagging: snippet_tags: - s3control.java2.get_job_tagging.main services: - s3-control: {GetJobTagging} + s3-control: {GetJobTagging} s3-control_UpdateJobStatus: languages: Java: @@ -102,7 +102,7 @@ s3-control_UpdateJobStatus: snippet_tags: - s3control.java2.cancel_job.main services: - s3-control: {UpdateJobStatus} + s3-control: {UpdateJobStatus} s3-control_UpdateJobPriority: languages: Java: @@ -115,7 +115,7 @@ s3-control_UpdateJobPriority: snippet_tags: - s3control.java2.update_job.main services: - s3-control: {UpdateJobPriority} + s3-control: {UpdateJobPriority} s3-control_Basics: synopsis: learn core operations for &S3Control;. category: Basics @@ -132,5 +132,24 @@ s3-control_Basics: - description: An action class that wraps operations. snippet_tags: - s3control.java2.job.actions.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: Learn S3 Batch Basics Scenario. + snippet_tags: + - python.example_code.s3control.Batch.scenario services: - s3-control: {CreateJob, DeleteJobTagging, DescribeJob, GetJobTagging, ListJobs, PutJobTagging, UpdateJobPriority, UpdateJobStatus} + s3-control: + { + CreateJob, + DeleteJobTagging, + DescribeJob, + GetJobTagging, + ListJobs, + PutJobTagging, + UpdateJobPriority, + UpdateJobStatus, + } diff --git a/python/example_code/s3/scenarios/batch/s3_batch.py b/python/example_code/s3/scenarios/batch/s3_batch.py index bb5ed67ebeb..57a25ec2906 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch.py +++ b/python/example_code/s3/scenarios/batch/s3_batch.py @@ -1,7 +1,7 @@ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 -# snippet-start:[python.example_code.s3.S3Batch.scenario] +# snippet-start:[python.example_code.s3control.Batch.scenario] """ This module provides functionality for AWS S3 Batch Operations. It includes classes for managing CloudFormation stacks and S3 batch scenarios. @@ -631,4 +631,4 @@ def main(): if __name__ == "__main__": main() -# snippet-end:[python.example_code.s3.S3Batch.scenario] +# snippet-end:[python.example_code.s3control.Batch.scenario] From f95d9aa593ddf8960f01aaf3e749c1170ed52937 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Mon, 4 Aug 2025 16:13:36 -0700 Subject: [PATCH 06/16] Add job status, priority updates, tagging --- .../s3/scenarios/batch/s3_batch.py | 248 ++++++++++++++---- 1 file changed, 203 insertions(+), 45 deletions(-) diff --git a/python/example_code/s3/scenarios/batch/s3_batch.py b/python/example_code/s3/scenarios/batch/s3_batch.py index 57a25ec2906..9acd3c53bb6 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch.py +++ b/python/example_code/s3/scenarios/batch/s3_batch.py @@ -330,12 +330,11 @@ def create_s3_batch_job(self, account_id, role_arn, manifest_location, Priority=10, RoleArn=role_arn, Description='Batch job for tagging objects', - # Set to False to avoid confirmation requirement - ConfirmationRequired=False + # Set to True so job starts in Suspended state for demonstration + ConfirmationRequired=True ) job_id = response['JobId'] - print(f"Created batch job with ID: {job_id}") - print("Job created and should start automatically") + print(f"The Job id is {job_id}") return job_id except ClientError as e: print(f"Error creating batch job: {e}") @@ -362,7 +361,6 @@ def check_job_failure_reasons(self, job_id, account_id): JobId=job_id ) if 'FailureReasons' in response['Job']: - print("Job failure reasons:") for reason in response['Job']['FailureReasons']: print(f"- {reason}") return response['Job'].get('FailureReasons', []) @@ -426,9 +424,6 @@ def update_job_priority(self, job_id, account_id): Args: job_id (str): ID of the batch job account_id (str): AWS account ID - - Raises: - ClientError: If updating job priority fails """ try: # Check current job status @@ -437,36 +432,174 @@ def update_job_priority(self, job_id, account_id): JobId=job_id ) current_status = response['Job']['Status'] - print(f"Current job status before update: {current_status}") - print(f"Full job details: {response['Job']}") + print(f"Current job status: {current_status}") - # First try to update the job priority - try: + # Only update priority if job is in a state that allows it + if current_status in ['Ready', 'Suspended']: self.s3control_client.update_job_priority( AccountId=account_id, JobId=job_id, - Priority=20 + Priority=60 ) - print(f"Successfully updated priority for job {job_id}") - except ClientError as e: - print(f"Warning: Could not update job priority: {e}") - # Continue anyway to try activating the job - - # Then try to activate the job - try: - self.s3control_client.update_job_status( - AccountId=account_id, - JobId=job_id, - RequestedJobStatus='Active' - ) - print(f"Successfully activated job {job_id}") - except ClientError as e: - print(f"Error activating job: {e}") - if 'Message' in str(e): - print(f"Detailed error message: {e.response.get('Message', '')}") - raise + print("The job priority was updated") + + # Try to activate the job after priority update + try: + self.s3control_client.update_job_status( + AccountId=account_id, + JobId=job_id, + RequestedJobStatus='Ready' + ) + print("Job activated successfully") + except ClientError as activation_error: + print(f"Note: Could not activate job automatically: {activation_error}") + print("Job priority was updated successfully. Job may need manual activation in the console.") + elif current_status in ['Active', 'Completing', 'Complete']: + print(f"Job is in '{current_status}' state - priority cannot be updated") + if current_status == 'Completing': + print("Job is finishing up and will complete soon.") + elif current_status == 'Complete': + print("Job has already completed successfully.") + else: + print("Job is currently running.") + else: + print(f"Job is in '{current_status}' state - priority update not allowed") + except ClientError as e: print(f"Error updating job priority: {e}") + # Don't raise the error to allow the scenario to continue + print("Continuing with the scenario...") + return + + def cancel_job(self, job_id, account_id): + """ + Cancel an S3 batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + self.s3control_client.update_job_status( + AccountId=account_id, + JobId=job_id, + RequestedJobStatus='Cancelled' + ) + print(f"Job {job_id} was successfully canceled.") + except ClientError as e: + print(f"Error canceling job: {e}") + raise + + def describe_job_details(self, job_id, account_id): + """ + Describe detailed information about a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + response = self.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + job = response['Job'] + print(f"Job ID: {job['JobId']}") + print(f"Description: {job.get('Description', 'N/A')}") + print(f"Status: {job['Status']}") + print(f"Role ARN: {job['RoleArn']}") + print(f"Priority: {job['Priority']}") + if 'ProgressSummary' in job: + progress = job['ProgressSummary'] + print(f"Progress Summary: Total={progress.get('TotalNumberOfTasks', 0)}, " + f"Succeeded={progress.get('NumberOfTasksSucceeded', 0)}, " + f"Failed={progress.get('NumberOfTasksFailed', 0)}") + except ClientError as e: + print(f"Error describing job: {e}") + raise + + def get_job_tags(self, job_id, account_id): + """ + Get tags associated with a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + response = self.s3control_client.get_job_tagging( + AccountId=account_id, + JobId=job_id + ) + tags = response.get('Tags', []) + if tags: + print(f"Tags for job {job_id}:") + for tag in tags: + print(f" {tag['Key']}: {tag['Value']}") + else: + print(f"No tags found for job ID: {job_id}") + except ClientError as e: + print(f"Error getting job tags: {e}") + raise + + def put_job_tags(self, job_id, account_id): + """ + Add tags to a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + self.s3control_client.put_job_tagging( + AccountId=account_id, + JobId=job_id, + Tags=[ + {'Key': 'Environment', 'Value': 'Development'}, + {'Key': 'Team', 'Value': 'DataProcessing'} + ] + ) + print(f"Additional tags were added to job {job_id}") + except ClientError as e: + print(f"Error adding job tags: {e}") + raise + + def list_jobs(self, account_id): + """ + List all batch jobs for the account. + + Args: + account_id (str): AWS account ID + """ + try: + response = self.s3control_client.list_jobs( + AccountId=account_id, + JobStatuses=['Active', 'Complete', 'Cancelled', 'Failed', 'New', 'Paused', 'Pausing', 'Preparing', 'Ready', 'Suspended'] + ) + jobs = response.get('Jobs', []) + for job in jobs: + print(f"The job id is {job['JobId']}") + print(f"The job priority is {job['Priority']}") + except ClientError as e: + print(f"Error listing jobs: {e}") + raise + + def delete_job_tags(self, job_id, account_id): + """ + Delete all tags from a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + self.s3control_client.delete_job_tagging( + AccountId=account_id, + JobId=job_id + ) + print(f"You have successfully deleted {job_id} tagging.") + except ClientError as e: + print(f"Error deleting job tags: {e}") raise def cleanup_resources(self, bucket_name, file_names): @@ -598,23 +731,48 @@ def main(): raise ValueError("Job failed, stopping execution") wait_for_input() - print("\n2. Checking job status...") - # Get current job status instead of trying to update priority - response = scenario.s3control_client.describe_job( - AccountId=account_id, - JobId=job_id - ) - current_status = response['Job']['Status'] - print(f"Current job status: {current_status}") + print("\n" + scenario.DASHES) + print("2. Update an existing S3 Batch Operations job's priority") + print("In this step, we modify the job priority value. The higher the number, the higher the priority.") + scenario.update_job_priority(job_id, account_id) - # Only try to update priority if job is not already active - if current_status not in ['Active', 'Complete']: - print("\nUpdating job priority...") - scenario.update_job_priority(job_id, account_id) + wait_for_input() + print("\n" + scenario.DASHES) + print("3. Cancel the S3 Batch job") + cancel_job = input("Do you want to cancel the Batch job? (y/n): ").lower() == 'y' + if cancel_job: + scenario.cancel_job(job_id, account_id) else: - print("Job is already active or complete, no need to update priority.") + print(f"Job {job_id} was not canceled.") + + wait_for_input() + print("\n" + scenario.DASHES) + print("4. Describe the job that was just created") + scenario.describe_job_details(job_id, account_id) + + wait_for_input() + print("\n" + scenario.DASHES) + print("5. Describe the tags associated with the job") + scenario.get_job_tags(job_id, account_id) + + wait_for_input() + print("\n" + scenario.DASHES) + print("6. Update Batch Job Tags") + scenario.put_job_tags(job_id, account_id) + + wait_for_input() + print("\n" + scenario.DASHES) + print("7. List Batch Jobs") + scenario.list_jobs(account_id) + + wait_for_input() + print("\n" + scenario.DASHES) + print("8. Delete the Amazon S3 Batch job tagging") + delete_tags = input("Do you want to delete Batch job tagging? (y/n): ").lower() == 'y' + if delete_tags: + scenario.delete_job_tags(job_id, account_id) - print("\nCleanup") + print("\n" + scenario.DASHES) if input( "Do you want to delete the AWS resources used in this scenario? (y/n): " ).lower() == 'y': From 9ad65c917af5bc4a02b860b703620f6ab2ee589c Mon Sep 17 00:00:00 2001 From: John Lwin Date: Mon, 4 Aug 2025 18:42:18 -0700 Subject: [PATCH 07/16] feat: add type hints and fix status message --- .../s3/scenarios/batch/s3_batch.py | 51 ++++++++++--------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/python/example_code/s3/scenarios/batch/s3_batch.py b/python/example_code/s3/scenarios/batch/s3_batch.py index 9acd3c53bb6..7c3a95520f8 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch.py +++ b/python/example_code/s3/scenarios/batch/s3_batch.py @@ -10,6 +10,7 @@ import json import time import uuid +from typing import Dict, List, Tuple, Optional, Any import boto3 from botocore.exceptions import ClientError, WaiterError @@ -17,7 +18,7 @@ class CloudFormationHelper: """Helper class for managing CloudFormation stack operations.""" # Change the value of 'region' to your preferred AWS Region. - def __init__(self, region_name='us-west-2'): + def __init__(self, region_name: str = 'us-west-2') -> None: """ Initialize CloudFormation helper. @@ -27,7 +28,7 @@ def __init__(self, region_name='us-west-2'): # Create a CloudFormation client for the specified region self.cfn_client = boto3.client('cloudformation', region_name=region_name) - def deploy_cloudformation_stack(self, stack_name): + def deploy_cloudformation_stack(self, stack_name: str) -> None: """ Deploy a CloudFormation stack with S3 batch operation permissions. @@ -104,7 +105,7 @@ def deploy_cloudformation_stack(self, stack_name): print(f"Error creating CloudFormation stack: {e}") raise - def get_stack_outputs(self, stack_name): + def get_stack_outputs(self, stack_name: str) -> Dict[str, str]: """ Get CloudFormation stack outputs. @@ -129,7 +130,7 @@ def get_stack_outputs(self, stack_name): print(f"Error getting stack outputs: {e}") raise - def destroy_cloudformation_stack(self, stack_name): + def destroy_cloudformation_stack(self, stack_name: str) -> None: """ Delete a CloudFormation stack. @@ -149,7 +150,7 @@ def destroy_cloudformation_stack(self, stack_name): print(f"Error deleting CloudFormation stack: {e}") raise - def _wait_for_stack_completion(self, stack_name, operation): + def _wait_for_stack_completion(self, stack_name: str, operation: str) -> None: """ Wait for CloudFormation stack operation to complete. @@ -179,7 +180,7 @@ class S3BatchScenario: DASHES = "-" * 80 STACK_NAME = "MyS3Stack" - def __init__(self, region_name='us-west-2'): + def __init__(self, region_name: str = 'us-west-2') -> None: """ Initialize S3 Batch Operations scenario. @@ -191,7 +192,7 @@ def __init__(self, region_name='us-west-2'): self.s3control_client = boto3.client('s3control', region_name=region_name) self.sts_client = boto3.client('sts', region_name=region_name) - def get_account_id(self): + def get_account_id(self) -> str: """ Get AWS account ID. @@ -200,7 +201,7 @@ def get_account_id(self): """ return self.sts_client.get_caller_identity()["Account"] - def create_bucket(self, bucket_name): + def create_bucket(self, bucket_name: str) -> None: """ Create an S3 bucket. @@ -225,7 +226,7 @@ def create_bucket(self, bucket_name): print(f"Error creating bucket: {e}") raise - def upload_files_to_bucket(self, bucket_name, file_names): + def upload_files_to_bucket(self, bucket_name: str, file_names: List[str]) -> str: """ Upload files to S3 bucket including manifest file. @@ -268,8 +269,8 @@ def upload_files_to_bucket(self, bucket_name, file_names): print(f"Error uploading files: {e}") raise - def create_s3_batch_job(self, account_id, role_arn, manifest_location, - report_bucket_name): + def create_s3_batch_job(self, account_id: str, role_arn: str, manifest_location: str, + report_bucket_name: str) -> str: """ Create an S3 batch operation job. @@ -341,7 +342,7 @@ def create_s3_batch_job(self, account_id, role_arn, manifest_location, if 'Message' in str(e): print(f"Detailed error message: {e.response['Message']}") raise - def check_job_failure_reasons(self, job_id, account_id): + def check_job_failure_reasons(self, job_id: str, account_id: str) -> List[Dict[str, Any]]: """ Check for any failure reasons of a batch job. @@ -368,7 +369,7 @@ def check_job_failure_reasons(self, job_id, account_id): print(f"Error checking job failure reasons: {e}") raise - def wait_for_job_ready(self, job_id, account_id, desired_status='Ready'): + def wait_for_job_ready(self, job_id: str, account_id: str, desired_status: str = 'Ready') -> bool: """ Wait for a job to reach the desired status. @@ -402,7 +403,7 @@ def wait_for_job_ready(self, job_id, account_id, desired_status='Ready'): print("Job is in Suspended state, can proceed with activation") return True if current_status in ['Active', 'Failed', 'Cancelled', 'Complete']: - print(f"Job is in {current_status} state, cannot update priority") + print(f"Job is in {current_status} state, cannot reach {desired_status} status") if 'FailureReasons' in response['Job']: print("Failure reasons:") for reason in response['Job']['FailureReasons']: @@ -417,7 +418,7 @@ def wait_for_job_ready(self, job_id, account_id, desired_status='Ready'): print(f"Timeout waiting for job to become {desired_status}") return False - def update_job_priority(self, job_id, account_id): + def update_job_priority(self, job_id: str, account_id: str) -> None: """ Update the priority of a batch job and start it. @@ -471,7 +472,7 @@ def update_job_priority(self, job_id, account_id): print("Continuing with the scenario...") return - def cancel_job(self, job_id, account_id): + def cancel_job(self, job_id: str, account_id: str) -> None: """ Cancel an S3 batch job. @@ -490,7 +491,7 @@ def cancel_job(self, job_id, account_id): print(f"Error canceling job: {e}") raise - def describe_job_details(self, job_id, account_id): + def describe_job_details(self, job_id: str, account_id: str) -> None: """ Describe detailed information about a batch job. @@ -518,7 +519,7 @@ def describe_job_details(self, job_id, account_id): print(f"Error describing job: {e}") raise - def get_job_tags(self, job_id, account_id): + def get_job_tags(self, job_id: str, account_id: str) -> None: """ Get tags associated with a batch job. @@ -542,7 +543,7 @@ def get_job_tags(self, job_id, account_id): print(f"Error getting job tags: {e}") raise - def put_job_tags(self, job_id, account_id): + def put_job_tags(self, job_id: str, account_id: str) -> None: """ Add tags to a batch job. @@ -564,7 +565,7 @@ def put_job_tags(self, job_id, account_id): print(f"Error adding job tags: {e}") raise - def list_jobs(self, account_id): + def list_jobs(self, account_id: str) -> None: """ List all batch jobs for the account. @@ -584,7 +585,7 @@ def list_jobs(self, account_id): print(f"Error listing jobs: {e}") raise - def delete_job_tags(self, job_id, account_id): + def delete_job_tags(self, job_id: str, account_id: str) -> None: """ Delete all tags from a batch job. @@ -602,7 +603,7 @@ def delete_job_tags(self, job_id, account_id): print(f"Error deleting job tags: {e}") raise - def cleanup_resources(self, bucket_name, file_names): + def cleanup_resources(self, bucket_name: str, file_names: List[str]) -> None: """ Clean up all resources created during the scenario. @@ -637,7 +638,7 @@ def cleanup_resources(self, bucket_name, file_names): raise -def wait_for_input(): +def wait_for_input() -> None: """ Wait for user input to continue. @@ -652,7 +653,7 @@ def wait_for_input(): print("Invalid input. Please try again.") -def setup_resources(scenario, bucket_name, file_names): +def setup_resources(scenario: S3BatchScenario, bucket_name: str, file_names: List[str]) -> Tuple[str, str]: """ Set up initial resources for the scenario. @@ -672,7 +673,7 @@ def setup_resources(scenario, bucket_name, file_names): return manifest_location, report_bucket_arn -def main(): +def main() -> None: """Main function to run the S3 Batch Operations scenario.""" region_name = 'us-west-2' scenario = S3BatchScenario(region_name) From 58aea5fe239c46a4341051a2378b715733f70a28 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Tue, 5 Aug 2025 00:27:50 -0700 Subject: [PATCH 08/16] chore(metadata): add snippet tags and metadata for every operations --- .../s3/scenarios/batch/s3_batch.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/python/example_code/s3/scenarios/batch/s3_batch.py b/python/example_code/s3/scenarios/batch/s3_batch.py index 7c3a95520f8..0202cff350e 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch.py +++ b/python/example_code/s3/scenarios/batch/s3_batch.py @@ -15,6 +15,7 @@ import boto3 from botocore.exceptions import ClientError, WaiterError +# snippet-start:[python.example_code.s3control.CloudFormationHelper] class CloudFormationHelper: """Helper class for managing CloudFormation stack operations.""" # Change the value of 'region' to your preferred AWS Region. @@ -173,7 +174,9 @@ def _wait_for_stack_completion(self, stack_name: str, operation: str) -> None: except WaiterError as e: print(f"Error waiting for stack {operation}: {e}") raise +# snippet-end:[python.example_code.s3control.CloudFormationHelper] +# snippet-start:[python.example_code.s3control.helper.S3BatchScenario] class S3BatchScenario: """Class for managing S3 Batch Operations scenarios.""" @@ -269,6 +272,7 @@ def upload_files_to_bucket(self, bucket_name: str, file_names: List[str]) -> str print(f"Error uploading files: {e}") raise + # snippet-start:[python.example_code.s3control.create_job] def create_s3_batch_job(self, account_id: str, role_arn: str, manifest_location: str, report_bucket_name: str) -> str: """ @@ -342,6 +346,8 @@ def create_s3_batch_job(self, account_id: str, role_arn: str, manifest_location: if 'Message' in str(e): print(f"Detailed error message: {e.response['Message']}") raise + # snippet-end:[python.example_code.s3control.create_job] + def check_job_failure_reasons(self, job_id: str, account_id: str) -> List[Dict[str, Any]]: """ Check for any failure reasons of a batch job. @@ -418,6 +424,7 @@ def wait_for_job_ready(self, job_id: str, account_id: str, desired_status: str = print(f"Timeout waiting for job to become {desired_status}") return False + # snippet-start:[python.example_code.s3control.update_job_priority] def update_job_priority(self, job_id: str, account_id: str) -> None: """ Update the priority of a batch job and start it. @@ -471,6 +478,7 @@ def update_job_priority(self, job_id: str, account_id: str) -> None: # Don't raise the error to allow the scenario to continue print("Continuing with the scenario...") return + # snippet-end:[python.example_code.s3control.update_job_priority] def cancel_job(self, job_id: str, account_id: str) -> None: """ @@ -491,6 +499,7 @@ def cancel_job(self, job_id: str, account_id: str) -> None: print(f"Error canceling job: {e}") raise + # snippet-start:[python.example_code.s3control.describe_job] def describe_job_details(self, job_id: str, account_id: str) -> None: """ Describe detailed information about a batch job. @@ -518,7 +527,9 @@ def describe_job_details(self, job_id: str, account_id: str) -> None: except ClientError as e: print(f"Error describing job: {e}") raise + # snippet-end:[python.example_code.s3control.describe_job] + # snippet-start:[python.example_code.s3control.get_job_tagging] def get_job_tags(self, job_id: str, account_id: str) -> None: """ Get tags associated with a batch job. @@ -542,7 +553,9 @@ def get_job_tags(self, job_id: str, account_id: str) -> None: except ClientError as e: print(f"Error getting job tags: {e}") raise + # snippet-end:[python.example_code.s3control.get_job_tagging] + # snippet-start:[python.example_code.s3control.put_job_tagging] def put_job_tags(self, job_id: str, account_id: str) -> None: """ Add tags to a batch job. @@ -564,7 +577,9 @@ def put_job_tags(self, job_id: str, account_id: str) -> None: except ClientError as e: print(f"Error adding job tags: {e}") raise + # snippet-end:[python.example_code.s3control.put_job_tagging] + # snippet-start:[python.example_code.s3control.list_jobs] def list_jobs(self, account_id: str) -> None: """ List all batch jobs for the account. @@ -584,7 +599,9 @@ def list_jobs(self, account_id: str) -> None: except ClientError as e: print(f"Error listing jobs: {e}") raise + # snippet-end:[python.example_code.s3control.list_jobs] + # snippet-start:[python.example_code.s3control.delete_job_tagging] def delete_job_tags(self, job_id: str, account_id: str) -> None: """ Delete all tags from a batch job. @@ -602,6 +619,7 @@ def delete_job_tags(self, job_id: str, account_id: str) -> None: except ClientError as e: print(f"Error deleting job tags: {e}") raise + # snippet-end:[python.example_code.s3control.delete_job_tagging] def cleanup_resources(self, bucket_name: str, file_names: List[str]) -> None: """ @@ -636,6 +654,7 @@ def cleanup_resources(self, bucket_name: str, file_names: List[str]) -> None: except ClientError as e: print(f"Error in cleanup: {e}") raise +# snippet-end:[python.example_code.s3control.helper.S3BatchScenario] def wait_for_input() -> None: From c4f1072d90e26c8c49872f15d10f21219426a5c1 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Tue, 5 Aug 2025 01:23:38 -0700 Subject: [PATCH 09/16] refactor: standardize user input handling with helper and update metadata.yaml --- .doc_gen/metadata/s3-control_metadata.yaml | 72 +++++++++++++++++++ .../s3/scenarios/batch/s3_batch.py | 28 ++++---- 2 files changed, 86 insertions(+), 14 deletions(-) diff --git a/.doc_gen/metadata/s3-control_metadata.yaml b/.doc_gen/metadata/s3-control_metadata.yaml index b302756a413..d7ffd4f54ed 100644 --- a/.doc_gen/metadata/s3-control_metadata.yaml +++ b/.doc_gen/metadata/s3-control_metadata.yaml @@ -13,6 +13,15 @@ s3-control_Hello: - description: snippet_tags: - s3control.java2.list_jobs.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: + snippet_tags: + - python.example_code.s3control.list_jobs services: s3-control: {ListJobs} @@ -36,6 +45,15 @@ s3-control_CreateJob: - description: Create a new governance retention job. snippet_tags: - s3.java2.create_governance_retemtion.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: + snippet_tags: + - python.example_code.s3control.create_job services: s3-control: {CreateJob} s3-control_PutJobTagging: @@ -49,6 +67,15 @@ s3-control_PutJobTagging: - description: snippet_tags: - s3control.java2.job.put.tags.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: + snippet_tags: + - python.example_code.s3control.put_job_tagging services: s3-control: {PutJobTagging} s3-control_DescribeJob: @@ -62,6 +89,15 @@ s3-control_DescribeJob: - description: snippet_tags: - s3control.java2.describe_job.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: + snippet_tags: + - python.example_code.s3control.describe_job services: s3-control: {DescribeJob} s3-control_DeleteJobTagging: @@ -75,6 +111,15 @@ s3-control_DeleteJobTagging: - description: snippet_tags: - s3control.java2.del_job_tagging.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: + snippet_tags: + - python.example_code.s3control.delete_job_tagging services: s3-control: {DeleteJobTagging} s3-control_GetJobTagging: @@ -88,6 +133,15 @@ s3-control_GetJobTagging: - description: snippet_tags: - s3control.java2.get_job_tagging.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: + snippet_tags: + - python.example_code.s3control.get_job_tagging services: s3-control: {GetJobTagging} s3-control_UpdateJobStatus: @@ -101,6 +155,15 @@ s3-control_UpdateJobStatus: - description: snippet_tags: - s3control.java2.cancel_job.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: + snippet_tags: + - python.example_code.s3control.update_job_status services: s3-control: {UpdateJobStatus} s3-control_UpdateJobPriority: @@ -114,6 +177,15 @@ s3-control_UpdateJobPriority: - description: snippet_tags: - s3control.java2.update_job.main + Python: + versions: + - sdk_version: 3 + github: python/example_code/s3/scenarios/batch + sdkguide: + excerpts: + - description: + snippet_tags: + - python.example_code.s3control.update_job_priority services: s3-control: {UpdateJobPriority} s3-control_Basics: diff --git a/python/example_code/s3/scenarios/batch/s3_batch.py b/python/example_code/s3/scenarios/batch/s3_batch.py index 0202cff350e..b9a71ed6a8a 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch.py +++ b/python/example_code/s3/scenarios/batch/s3_batch.py @@ -10,11 +10,15 @@ import json import time import uuid +import sys from typing import Dict, List, Tuple, Optional, Any import boto3 from botocore.exceptions import ClientError, WaiterError +sys.path.append("../../../..") +import demo_tools.question as q + # snippet-start:[python.example_code.s3control.CloudFormationHelper] class CloudFormationHelper: """Helper class for managing CloudFormation stack operations.""" @@ -664,12 +668,8 @@ def wait_for_input() -> None: Returns: None """ - while True: - user_input = input("\nEnter 'c' followed by to continue: ") - if user_input.lower() == 'c': - print("Continuing with the program...\n") - break - print("Invalid input. Please try again.") + q.ask("\nPress Enter to continue...") + print() def setup_resources(scenario: S3BatchScenario, bucket_name: str, file_names: List[str]) -> Tuple[str, str]: @@ -745,9 +745,9 @@ def main() -> None: failure_reasons = scenario.check_job_failure_reasons(job_id, account_id) if failure_reasons: print("\nJob failed. Please fix the issues and try again.") - if input( - "Do you want to proceed with the rest of the operations? (y/n): " - ).lower() != 'y': + if not q.ask( + "Do you want to proceed with the rest of the operations? (y/n): ", q.is_yesno + ): raise ValueError("Job failed, stopping execution") wait_for_input() @@ -759,7 +759,7 @@ def main() -> None: wait_for_input() print("\n" + scenario.DASHES) print("3. Cancel the S3 Batch job") - cancel_job = input("Do you want to cancel the Batch job? (y/n): ").lower() == 'y' + cancel_job = q.ask("Do you want to cancel the Batch job? (y/n): ", q.is_yesno) if cancel_job: scenario.cancel_job(job_id, account_id) else: @@ -788,14 +788,14 @@ def main() -> None: wait_for_input() print("\n" + scenario.DASHES) print("8. Delete the Amazon S3 Batch job tagging") - delete_tags = input("Do you want to delete Batch job tagging? (y/n): ").lower() == 'y' + delete_tags = q.ask("Do you want to delete Batch job tagging? (y/n): ", q.is_yesno) if delete_tags: scenario.delete_job_tags(job_id, account_id) print("\n" + scenario.DASHES) - if input( - "Do you want to delete the AWS resources used in this scenario? (y/n): " - ).lower() == 'y': + if q.ask( + "Do you want to delete the AWS resources used in this scenario? (y/n): ", q.is_yesno + ): scenario.cleanup_resources(bucket_name, file_names) cfn_helper.destroy_cloudformation_stack(scenario.STACK_NAME) From 0182aacd596891eeea5524df35c7e3c72e3d6d2d Mon Sep 17 00:00:00 2001 From: John Lwin Date: Thu, 7 Aug 2025 16:43:06 -0700 Subject: [PATCH 10/16] refactor: restructure classes to follow Python examples standards --- .../scenarios/batch/cloudformation_helper.py | 170 +++++++ .../s3/scenarios/batch/requirements.txt | 2 + .../s3/scenarios/batch/s3_batch_scenario.py | 195 +++++++ .../s3/scenarios/batch/s3_batch_wrapper.py | 478 ++++++++++++++++++ 4 files changed, 845 insertions(+) create mode 100644 python/example_code/s3/scenarios/batch/cloudformation_helper.py create mode 100644 python/example_code/s3/scenarios/batch/requirements.txt create mode 100644 python/example_code/s3/scenarios/batch/s3_batch_scenario.py create mode 100644 python/example_code/s3/scenarios/batch/s3_batch_wrapper.py diff --git a/python/example_code/s3/scenarios/batch/cloudformation_helper.py b/python/example_code/s3/scenarios/batch/cloudformation_helper.py new file mode 100644 index 00000000000..01334da5f79 --- /dev/null +++ b/python/example_code/s3/scenarios/batch/cloudformation_helper.py @@ -0,0 +1,170 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Helper class for managing CloudFormation stack operations for S3 Batch Operations. +""" + +import json +from typing import Dict + +import boto3 +from botocore.exceptions import ClientError, WaiterError + +# snippet-start:[python.example_code.s3control.CloudFormationHelper] +class CloudFormationHelper: + """Helper class for managing CloudFormation stack operations.""" + + def __init__(self, region_name: str = 'us-west-2') -> None: + """ + Initialize CloudFormation helper. + + Args: + region_name (str): AWS region name + """ + self.cfn_client = boto3.client('cloudformation', region_name=region_name) + + def deploy_cloudformation_stack(self, stack_name: str) -> None: + """ + Deploy a CloudFormation stack with S3 batch operation permissions. + + Args: + stack_name (str): Name of the CloudFormation stack + + Raises: + ClientError: If stack creation fails + """ + try: + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "S3BatchRole": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "batchoperations.s3.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/AmazonS3FullAccess" + ], + "Policies": [ + { + "PolicyName": "S3BatchOperationsPolicy", + "PolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:*", + "s3-object-lambda:*" + ], + "Resource": "*" + } + ] + } + } + ] + } + } + }, + "Outputs": { + "S3BatchRoleArn": { + "Description": "ARN of IAM Role for S3 Batch Operations", + "Value": {"Fn::GetAtt": ["S3BatchRole", "Arn"]} + } + } + } + + self.cfn_client.create_stack( + StackName=stack_name, + TemplateBody=json.dumps(template), + Capabilities=['CAPABILITY_IAM'] + ) + + print(f"Creating stack {stack_name}...") + self._wait_for_stack_completion(stack_name, 'CREATE') + print(f"Stack {stack_name} created successfully") + + except ClientError as e: + print(f"Error creating CloudFormation stack: {e}") + raise + + def get_stack_outputs(self, stack_name: str) -> Dict[str, str]: + """ + Get CloudFormation stack outputs. + + Args: + stack_name (str): Name of the CloudFormation stack + + Returns: + dict: Stack outputs + + Raises: + ClientError: If getting stack outputs fails + """ + try: + response = self.cfn_client.describe_stacks(StackName=stack_name) + outputs = {} + if 'Stacks' in response and response['Stacks']: + for output in response['Stacks'][0].get('Outputs', []): + outputs[output['OutputKey']] = output['OutputValue'] + return outputs + + except ClientError as e: + print(f"Error getting stack outputs: {e}") + raise + + def destroy_cloudformation_stack(self, stack_name: str) -> None: + """ + Delete a CloudFormation stack. + + Args: + stack_name (str): Name of the CloudFormation stack + + Raises: + ClientError: If stack deletion fails + """ + try: + self.cfn_client.delete_stack(StackName=stack_name) + print(f"Deleting stack {stack_name}...") + self._wait_for_stack_completion(stack_name, 'DELETE') + print(f"Stack {stack_name} deleted successfully") + + except ClientError as e: + print(f"Error deleting CloudFormation stack: {e}") + raise + + def _wait_for_stack_completion(self, stack_name: str, operation: str) -> None: + """ + Wait for CloudFormation stack operation to complete. + + Args: + stack_name (str): Name of the CloudFormation stack + operation (str): Stack operation (CREATE or DELETE) + + Raises: + WaiterError: If waiting for stack completion fails + """ + try: + waiter = self.cfn_client.get_waiter( + 'stack_create_complete' if operation == 'CREATE' + else 'stack_delete_complete' + ) + waiter.wait( + StackName=stack_name, + WaiterConfig={'Delay': 5, 'MaxAttempts': 60} + ) + except WaiterError as e: + print(f"Error waiting for stack {operation}: {e}") + raise +# snippet-end:[python.example_code.s3control.CloudFormationHelper] \ No newline at end of file diff --git a/python/example_code/s3/scenarios/batch/requirements.txt b/python/example_code/s3/scenarios/batch/requirements.txt new file mode 100644 index 00000000000..2c9802951bf --- /dev/null +++ b/python/example_code/s3/scenarios/batch/requirements.txt @@ -0,0 +1,2 @@ +boto3>=1.26.0 +botocore>=1.29.0 \ No newline at end of file diff --git a/python/example_code/s3/scenarios/batch/s3_batch_scenario.py b/python/example_code/s3/scenarios/batch/s3_batch_scenario.py new file mode 100644 index 00000000000..2884ffc14a3 --- /dev/null +++ b/python/example_code/s3/scenarios/batch/s3_batch_scenario.py @@ -0,0 +1,195 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +S3 Batch Operations Scenario + +This scenario demonstrates how to use AWS S3 Batch Operations to perform large-scale +operations on S3 objects. The scenario includes the following steps: + +1. Create S3 Batch Job - Creates a batch job to tag objects +2. Update Job Priority - Modifies the job priority and activates the job +3. Cancel Job - Optionally cancels the batch job +4. Describe Job Details - Shows detailed information about the job +5. Get Job Tags - Retrieves tags associated with the job +6. Put Job Tags - Adds additional tags to the job +7. List Jobs - Lists all batch jobs for the account +8. Delete Job Tags - Removes tags from the job + +The scenario uses CloudFormation to create necessary IAM roles and demonstrates +proper resource cleanup at the end. +""" + +import time +import uuid +import sys +from typing import Tuple + +from cloudformation_helper import CloudFormationHelper +from s3_batch_wrapper import S3BatchWrapper +sys.path.append("../../../..") +import demo_tools.question as q + +# snippet-start:[python.example_code.s3control.helper.S3BatchScenario] +class S3BatchScenario: + """Manages the S3 Batch Operations scenario.""" + + DASHES = "-" * 80 + STACK_NAME = "MyS3Stack" + + def __init__(self, s3_batch_wrapper: S3BatchWrapper, cfn_helper: CloudFormationHelper) -> None: + """ + Initialize the S3 Batch scenario. + + Args: + s3_batch_wrapper: S3BatchWrapper instance + cfn_helper: CloudFormationHelper instance + """ + self.s3_batch_wrapper = s3_batch_wrapper + self.cfn_helper = cfn_helper + + def wait_for_input(self) -> None: + """Wait for user input to continue.""" + q.ask("\nPress Enter to continue...") + print() + + def setup_resources(self, bucket_name: str, file_names: list) -> Tuple[str, str]: + """ + Set up initial resources for the scenario. + + Args: + bucket_name (str): Name of the bucket to create + file_names (list): List of files to upload + + Returns: + tuple: Manifest location and report bucket ARN + """ + print("\nSetting up required resources...") + self.s3_batch_wrapper.create_bucket(bucket_name) + report_bucket_arn = f"arn:aws:s3:::{bucket_name}" + manifest_location = f"arn:aws:s3:::{bucket_name}/job-manifest.csv" + self.s3_batch_wrapper.upload_files_to_bucket(bucket_name, file_names) + return manifest_location, report_bucket_arn + + def run_scenario(self) -> None: + """Run the S3 Batch Operations scenario.""" + account_id = self.s3_batch_wrapper.get_account_id() + bucket_name = f"demo-s3-batch-{str(uuid.uuid4())}" + file_names = [ + "job-manifest.csv", + "object-key-1.txt", + "object-key-2.txt", + "object-key-3.txt", + "object-key-4.txt" + ] + + print(self.DASHES) + print("Welcome to the Amazon S3 Batch basics scenario.") + print(""" + S3 Batch operations enables efficient and cost-effective processing of large-scale + data stored in Amazon S3. It automatically scales resources to handle varying workloads + without the need for manual intervention. + + This Python program walks you through Amazon S3 Batch operations. + """) + + try: + # Deploy CloudFormation stack for IAM roles + print("Deploying CloudFormation stack...") + self.cfn_helper.deploy_cloudformation_stack(self.STACK_NAME) + stack_outputs = self.cfn_helper.get_stack_outputs(self.STACK_NAME) + iam_role_arn = stack_outputs.get('S3BatchRoleArn') + + # Set up S3 bucket and upload test files + manifest_location, report_bucket_arn = self.setup_resources( + bucket_name, file_names + ) + + self.wait_for_input() + + print("\n1. Creating S3 Batch Job...") + job_id = self.s3_batch_wrapper.create_s3_batch_job( + account_id, + iam_role_arn, + manifest_location, + report_bucket_arn + ) + + time.sleep(5) + failure_reasons = self.s3_batch_wrapper.check_job_failure_reasons(job_id, account_id) + if failure_reasons: + print("\nJob failed. Please fix the issues and try again.") + if not q.ask( + "Do you want to proceed with the rest of the operations? (y/n): ", q.is_yesno + ): + raise ValueError("Job failed, stopping execution") + + self.wait_for_input() + print("\n" + self.DASHES) + print("2. Update an existing S3 Batch Operations job's priority") + print("In this step, we modify the job priority value. The higher the number, the higher the priority.") + self.s3_batch_wrapper.update_job_priority(job_id, account_id) + + self.wait_for_input() + print("\n" + self.DASHES) + print("3. Cancel the S3 Batch job") + cancel_job = q.ask("Do you want to cancel the Batch job? (y/n): ", q.is_yesno) + if cancel_job: + self.s3_batch_wrapper.cancel_job(job_id, account_id) + else: + print(f"Job {job_id} was not canceled.") + + self.wait_for_input() + print("\n" + self.DASHES) + print("4. Describe the job that was just created") + self.s3_batch_wrapper.describe_job_details(job_id, account_id) + + self.wait_for_input() + print("\n" + self.DASHES) + print("5. Describe the tags associated with the job") + self.s3_batch_wrapper.get_job_tags(job_id, account_id) + + self.wait_for_input() + print("\n" + self.DASHES) + print("6. Update Batch Job Tags") + self.s3_batch_wrapper.put_job_tags(job_id, account_id) + + self.wait_for_input() + print("\n" + self.DASHES) + print("7. List Batch Jobs") + self.s3_batch_wrapper.list_jobs(account_id) + + self.wait_for_input() + print("\n" + self.DASHES) + print("8. Delete the Amazon S3 Batch job tagging") + delete_tags = q.ask("Do you want to delete Batch job tagging? (y/n): ", q.is_yesno) + if delete_tags: + self.s3_batch_wrapper.delete_job_tags(job_id, account_id) + + print("\n" + self.DASHES) + if q.ask( + "Do you want to delete the AWS resources used in this scenario? (y/n): ", q.is_yesno + ): + self.s3_batch_wrapper.cleanup_resources(bucket_name, file_names) + self.cfn_helper.destroy_cloudformation_stack(self.STACK_NAME) + + except Exception as e: + print(f"An error occurred: {e}") + raise + + print("\nThe Amazon S3 Batch scenario has successfully completed.") + print(self.DASHES) +# snippet-end:[python.example_code.s3control.helper.S3BatchScenario] + +def main() -> None: + """Main function to run the S3 Batch Operations scenario.""" + region_name = 'us-west-2' + s3_batch_wrapper = S3BatchWrapper(region_name) + cfn_helper = CloudFormationHelper(region_name) + + scenario = S3BatchScenario(s3_batch_wrapper, cfn_helper) + scenario.run_scenario() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py new file mode 100644 index 00000000000..b1817ab8c0b --- /dev/null +++ b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py @@ -0,0 +1,478 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Wrapper class for AWS S3 Batch Operations. +""" + +import time +from typing import Dict, List, Any + +import boto3 +from botocore.exceptions import ClientError + +# snippet-start:[python.example_code.s3control.helper.S3BatchScenario] +class S3BatchWrapper: + """Wrapper class for managing S3 Batch Operations.""" + + def __init__(self, region_name: str = 'us-west-2') -> None: + """ + Initialize S3 Batch Operations wrapper. + + Args: + region_name (str): AWS region name + """ + self.region_name = region_name + self.s3_client = boto3.client('s3', region_name=region_name) + self.s3control_client = boto3.client('s3control', region_name=region_name) + self.sts_client = boto3.client('sts', region_name=region_name) + + def get_account_id(self) -> str: + """ + Get AWS account ID. + + Returns: + str: AWS account ID + """ + return self.sts_client.get_caller_identity()["Account"] + + def create_bucket(self, bucket_name: str) -> None: + """ + Create an S3 bucket. + + Args: + bucket_name (str): Name of the bucket to create + + Raises: + ClientError: If bucket creation fails + """ + try: + if self.region_name != 'us-east-1': + self.s3_client.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={ + 'LocationConstraint': self.region_name + } + ) + else: + self.s3_client.create_bucket(Bucket=bucket_name) + print(f"Created bucket: {bucket_name}") + except ClientError as e: + print(f"Error creating bucket: {e}") + raise + + def upload_files_to_bucket(self, bucket_name: str, file_names: List[str]) -> str: + """ + Upload files to S3 bucket including manifest file. + + Args: + bucket_name (str): Target bucket name + file_names (list): List of file names to upload + + Returns: + str: ETag of the manifest file + + Raises: + ClientError: If file upload fails + """ + try: + for file_name in file_names: + if file_name != "job-manifest.csv": + content = f"Content for {file_name}" + self.s3_client.put_object( + Bucket=bucket_name, + Key=file_name, + Body=content.encode('utf-8') + ) + print(f"Uploaded {file_name} to {bucket_name}") + + manifest_content = "" + for file_name in file_names: + if file_name != "job-manifest.csv": + manifest_content += f"{bucket_name},{file_name}\n" + + manifest_response = self.s3_client.put_object( + Bucket=bucket_name, + Key="job-manifest.csv", + Body=manifest_content.encode('utf-8') + ) + print(f"Uploaded manifest file to {bucket_name}") + print(f"Manifest content:\n{manifest_content}") + return manifest_response['ETag'].strip('"') + + except ClientError as e: + print(f"Error uploading files: {e}") + raise + + # snippet-start:[python.example_code.s3control.create_job] + def create_s3_batch_job(self, account_id: str, role_arn: str, manifest_location: str, + report_bucket_name: str) -> str: + """ + Create an S3 batch operation job. + + Args: + account_id (str): AWS account ID + role_arn (str): IAM role ARN for batch operations + manifest_location (str): Location of the manifest file + report_bucket_name (str): Bucket for job reports + + Returns: + str: Job ID + + Raises: + ClientError: If job creation fails + """ + try: + bucket_name = manifest_location.split(':::')[1].split('/')[0] + manifest_key = 'job-manifest.csv' + manifest_obj = self.s3_client.head_object( + Bucket=bucket_name, + Key=manifest_key + ) + etag = manifest_obj['ETag'].strip('"') + + response = self.s3control_client.create_job( + AccountId=account_id, + Operation={ + 'S3PutObjectTagging': { + 'TagSet': [ + { + 'Key': 'BatchTag', + 'Value': 'BatchValue' + }, + ] + } + }, + Report={ + 'Bucket': report_bucket_name, + 'Format': 'Report_CSV_20180820', + 'Enabled': True, + 'Prefix': 'batch-op-reports', + 'ReportScope': 'AllTasks' + }, + Manifest={ + 'Spec': { + 'Format': 'S3BatchOperations_CSV_20180820', + 'Fields': ['Bucket', 'Key'] + }, + 'Location': { + 'ObjectArn': manifest_location, + 'ETag': etag + } + }, + Priority=10, + RoleArn=role_arn, + Description='Batch job for tagging objects', + ConfirmationRequired=True + ) + job_id = response['JobId'] + print(f"The Job id is {job_id}") + return job_id + except ClientError as e: + print(f"Error creating batch job: {e}") + if 'Message' in str(e): + print(f"Detailed error message: {e.response['Message']}") + raise + # snippet-end:[python.example_code.s3control.create_job] + + def check_job_failure_reasons(self, job_id: str, account_id: str) -> List[Dict[str, Any]]: + """ + Check for any failure reasons of a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + + Returns: + list: List of failure reasons + + Raises: + ClientError: If checking job failure reasons fails + """ + try: + response = self.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + if 'FailureReasons' in response['Job']: + for reason in response['Job']['FailureReasons']: + print(f"- {reason}") + return response['Job'].get('FailureReasons', []) + except ClientError as e: + print(f"Error checking job failure reasons: {e}") + raise + + def wait_for_job_ready(self, job_id: str, account_id: str, desired_status: str = 'Ready') -> bool: + """ + Wait for a job to reach the desired status. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + desired_status (str): Target status to wait for + + Returns: + bool: True if desired status is reached, False otherwise + + Raises: + ClientError: If checking job status fails + """ + print(f"Waiting for job to become {desired_status}...") + max_attempts = 60 + attempt = 0 + while attempt < max_attempts: + try: + response = self.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + current_status = response['Job']['Status'] + print(f"Current job status: {current_status}") + if current_status == desired_status: + return True + if current_status == 'Suspended': + print("Job is in Suspended state, can proceed with activation") + return True + if current_status in ['Active', 'Failed', 'Cancelled', 'Complete']: + print(f"Job is in {current_status} state, cannot reach {desired_status} status") + if 'FailureReasons' in response['Job']: + print("Failure reasons:") + for reason in response['Job']['FailureReasons']: + print(f"- {reason}") + return False + + time.sleep(20) + attempt += 1 + except ClientError as e: + print(f"Error checking job status: {e}") + raise + print(f"Timeout waiting for job to become {desired_status}") + return False + + # snippet-start:[python.example_code.s3control.update_job_priority] + def update_job_priority(self, job_id: str, account_id: str) -> None: + """ + Update the priority of a batch job and start it. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + response = self.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + current_status = response['Job']['Status'] + print(f"Current job status: {current_status}") + + if current_status in ['Ready', 'Suspended']: + self.s3control_client.update_job_priority( + AccountId=account_id, + JobId=job_id, + Priority=60 + ) + print("The job priority was updated") + + try: + self.s3control_client.update_job_status( + AccountId=account_id, + JobId=job_id, + RequestedJobStatus='Ready' + ) + print("Job activated successfully") + except ClientError as activation_error: + print(f"Note: Could not activate job automatically: {activation_error}") + print("Job priority was updated successfully. Job may need manual activation in the console.") + elif current_status in ['Active', 'Completing', 'Complete']: + print(f"Job is in '{current_status}' state - priority cannot be updated") + if current_status == 'Completing': + print("Job is finishing up and will complete soon.") + elif current_status == 'Complete': + print("Job has already completed successfully.") + else: + print("Job is currently running.") + else: + print(f"Job is in '{current_status}' state - priority update not allowed") + + except ClientError as e: + print(f"Error updating job priority: {e}") + print("Continuing with the scenario...") + return + # snippet-end:[python.example_code.s3control.update_job_priority] + + def cancel_job(self, job_id: str, account_id: str) -> None: + """ + Cancel an S3 batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + self.s3control_client.update_job_status( + AccountId=account_id, + JobId=job_id, + RequestedJobStatus='Cancelled' + ) + print(f"Job {job_id} was successfully canceled.") + except ClientError as e: + print(f"Error canceling job: {e}") + raise + + # snippet-start:[python.example_code.s3control.describe_job] + def describe_job_details(self, job_id: str, account_id: str) -> None: + """ + Describe detailed information about a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + response = self.s3control_client.describe_job( + AccountId=account_id, + JobId=job_id + ) + job = response['Job'] + print(f"Job ID: {job['JobId']}") + print(f"Description: {job.get('Description', 'N/A')}") + print(f"Status: {job['Status']}") + print(f"Role ARN: {job['RoleArn']}") + print(f"Priority: {job['Priority']}") + if 'ProgressSummary' in job: + progress = job['ProgressSummary'] + print(f"Progress Summary: Total={progress.get('TotalNumberOfTasks', 0)}, " + f"Succeeded={progress.get('NumberOfTasksSucceeded', 0)}, " + f"Failed={progress.get('NumberOfTasksFailed', 0)}") + except ClientError as e: + print(f"Error describing job: {e}") + raise + # snippet-end:[python.example_code.s3control.describe_job] + + # snippet-start:[python.example_code.s3control.get_job_tagging] + def get_job_tags(self, job_id: str, account_id: str) -> None: + """ + Get tags associated with a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + response = self.s3control_client.get_job_tagging( + AccountId=account_id, + JobId=job_id + ) + tags = response.get('Tags', []) + if tags: + print(f"Tags for job {job_id}:") + for tag in tags: + print(f" {tag['Key']}: {tag['Value']}") + else: + print(f"No tags found for job ID: {job_id}") + except ClientError as e: + print(f"Error getting job tags: {e}") + raise + # snippet-start:[python.example_code.s3control.get_job_tagging] + + # snippet-start:[python.example_code.s3control.put_job_tagging] + def put_job_tags(self, job_id: str, account_id: str) -> None: + """ + Add tags to a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + self.s3control_client.put_job_tagging( + AccountId=account_id, + JobId=job_id, + Tags=[ + {'Key': 'Environment', 'Value': 'Development'}, + {'Key': 'Team', 'Value': 'DataProcessing'} + ] + ) + print(f"Additional tags were added to job {job_id}") + except ClientError as e: + print(f"Error adding job tags: {e}") + raise + # snippet-end:[python.example_code.s3control.put_job_tagging] + + # snippet-start:[python.example_code.s3control.list_jobs] + def list_jobs(self, account_id: str) -> None: + """ + List all batch jobs for the account. + + Args: + account_id (str): AWS account ID + """ + try: + response = self.s3control_client.list_jobs( + AccountId=account_id, + JobStatuses=['Active', 'Complete', 'Cancelled', 'Failed', 'New', 'Paused', 'Pausing', 'Preparing', 'Ready', 'Suspended'] + ) + jobs = response.get('Jobs', []) + for job in jobs: + print(f"The job id is {job['JobId']}") + print(f"The job priority is {job['Priority']}") + except ClientError as e: + print(f"Error listing jobs: {e}") + raise + # snippet-end:[python.example_code.s3control.list_jobs] + + # snippet-start:[python.example_code.s3control.delete_job_tagging] + def delete_job_tags(self, job_id: str, account_id: str) -> None: + """ + Delete all tags from a batch job. + + Args: + job_id (str): ID of the batch job + account_id (str): AWS account ID + """ + try: + self.s3control_client.delete_job_tagging( + AccountId=account_id, + JobId=job_id + ) + print(f"You have successfully deleted {job_id} tagging.") + except ClientError as e: + print(f"Error deleting job tags: {e}") + raise + # snippet-end:[python.example_code.s3control.delete_job_tagging] + + def cleanup_resources(self, bucket_name: str, file_names: List[str]) -> None: + """ + Clean up all resources created during the scenario. + + Args: + bucket_name (str): Name of the bucket to clean up + file_names (list): List of files to delete + + Raises: + ClientError: If cleanup fails + """ + try: + for file_name in file_names: + self.s3_client.delete_object(Bucket=bucket_name, Key=file_name) + print(f"Deleted {file_name}") + + response = self.s3_client.list_objects_v2( + Bucket=bucket_name, + Prefix='batch-op-reports/' + ) + if 'Contents' in response: + for obj in response['Contents']: + self.s3_client.delete_object( + Bucket=bucket_name, + Key=obj['Key'] + ) + print(f"Deleted {obj['Key']}") + + self.s3_client.delete_bucket(Bucket=bucket_name) + print(f"Deleted bucket {bucket_name}") + except ClientError as e: + print(f"Error in cleanup: {e}") + raise +# snippet-end:[python.example_code.s3control.helper.S3BatchScenario] \ No newline at end of file From b48cac03561a7f4ee645c666217cc11d77a6a1f9 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Fri, 8 Aug 2025 18:40:07 -0700 Subject: [PATCH 11/16] refactor: remove hard-coded region and use default config --- .../scenarios/batch/cloudformation_helper.py | 10 +++++----- .../s3/scenarios/batch/s3_batch_scenario.py | 12 ++++++++---- .../s3/scenarios/batch/s3_batch_wrapper.py | 19 ++++++++++--------- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/python/example_code/s3/scenarios/batch/cloudformation_helper.py b/python/example_code/s3/scenarios/batch/cloudformation_helper.py index 01334da5f79..486e24dfd6f 100644 --- a/python/example_code/s3/scenarios/batch/cloudformation_helper.py +++ b/python/example_code/s3/scenarios/batch/cloudformation_helper.py @@ -15,14 +15,14 @@ class CloudFormationHelper: """Helper class for managing CloudFormation stack operations.""" - def __init__(self, region_name: str = 'us-west-2') -> None: + def __init__(self) -> None: """ Initialize CloudFormation helper. - - Args: - region_name (str): AWS region name + + This example uses the default settings specified in your shared credentials + and config files. """ - self.cfn_client = boto3.client('cloudformation', region_name=region_name) + self.cfn_client = boto3.client('cloudformation') def deploy_cloudformation_stack(self, stack_name: str) -> None: """ diff --git a/python/example_code/s3/scenarios/batch/s3_batch_scenario.py b/python/example_code/s3/scenarios/batch/s3_batch_scenario.py index 2884ffc14a3..efd34e33947 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch_scenario.py +++ b/python/example_code/s3/scenarios/batch/s3_batch_scenario.py @@ -182,10 +182,14 @@ def run_scenario(self) -> None: # snippet-end:[python.example_code.s3control.helper.S3BatchScenario] def main() -> None: - """Main function to run the S3 Batch Operations scenario.""" - region_name = 'us-west-2' - s3_batch_wrapper = S3BatchWrapper(region_name) - cfn_helper = CloudFormationHelper(region_name) + """ + Main function to run the S3 Batch Operations scenario. + + This example uses the default settings specified in your shared credentials + and config files. + """ + s3_batch_wrapper = S3BatchWrapper() + cfn_helper = CloudFormationHelper() scenario = S3BatchScenario(s3_batch_wrapper, cfn_helper) scenario.run_scenario() diff --git a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py index b1817ab8c0b..307a2d68fef 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py +++ b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py @@ -15,17 +15,18 @@ class S3BatchWrapper: """Wrapper class for managing S3 Batch Operations.""" - def __init__(self, region_name: str = 'us-west-2') -> None: + def __init__(self) -> None: """ Initialize S3 Batch Operations wrapper. - - Args: - region_name (str): AWS region name + + This example uses the default settings specified in your shared credentials + and config files. """ - self.region_name = region_name - self.s3_client = boto3.client('s3', region_name=region_name) - self.s3control_client = boto3.client('s3control', region_name=region_name) - self.sts_client = boto3.client('sts', region_name=region_name) + self.s3_client = boto3.client('s3') + self.s3control_client = boto3.client('s3control') + self.sts_client = boto3.client('sts') + # Get region from the client for bucket creation logic + self.region_name = self.s3_client.meta.region_name def get_account_id(self) -> str: """ @@ -47,7 +48,7 @@ def create_bucket(self, bucket_name: str) -> None: ClientError: If bucket creation fails """ try: - if self.region_name != 'us-east-1': + if self.region_name and self.region_name != 'us-east-1': self.s3_client.create_bucket( Bucket=bucket_name, CreateBucketConfiguration={ From df897d3aecb3867c02a6795a7967d645f4d317aa Mon Sep 17 00:00:00 2001 From: John Lwin Date: Wed, 13 Aug 2025 02:32:51 -0700 Subject: [PATCH 12/16] refactor: use client injection pattern to follow Python examples standards --- .../scenarios/batch/cloudformation_helper.py | 12 ++++++------ .../s3/scenarios/batch/s3_batch_scenario.py | 10 ++++++++-- .../s3/scenarios/batch/s3_batch_wrapper.py | 18 +++++++++++------- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/python/example_code/s3/scenarios/batch/cloudformation_helper.py b/python/example_code/s3/scenarios/batch/cloudformation_helper.py index 486e24dfd6f..7c1221c70aa 100644 --- a/python/example_code/s3/scenarios/batch/cloudformation_helper.py +++ b/python/example_code/s3/scenarios/batch/cloudformation_helper.py @@ -6,7 +6,7 @@ """ import json -from typing import Dict +from typing import Dict, Any import boto3 from botocore.exceptions import ClientError, WaiterError @@ -15,14 +15,14 @@ class CloudFormationHelper: """Helper class for managing CloudFormation stack operations.""" - def __init__(self) -> None: + def __init__(self, cfn_client: Any) -> None: """ - Initialize CloudFormation helper. + Initializes the CloudFormationHelper with a CloudFormation client. - This example uses the default settings specified in your shared credentials - and config files. + :param cfn_client: A Boto3 Amazon CloudFormation client. This client provides + low-level access to AWS CloudFormation services. """ - self.cfn_client = boto3.client('cloudformation') + self.cfn_client = cfn_client def deploy_cloudformation_stack(self, stack_name: str) -> None: """ diff --git a/python/example_code/s3/scenarios/batch/s3_batch_scenario.py b/python/example_code/s3/scenarios/batch/s3_batch_scenario.py index efd34e33947..70af2554428 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch_scenario.py +++ b/python/example_code/s3/scenarios/batch/s3_batch_scenario.py @@ -25,6 +25,7 @@ import sys from typing import Tuple +import boto3 from cloudformation_helper import CloudFormationHelper from s3_batch_wrapper import S3BatchWrapper sys.path.append("../../../..") @@ -188,8 +189,13 @@ def main() -> None: This example uses the default settings specified in your shared credentials and config files. """ - s3_batch_wrapper = S3BatchWrapper() - cfn_helper = CloudFormationHelper() + s3_client = boto3.client('s3') + s3control_client = boto3.client('s3control') + sts_client = boto3.client('sts') + cfn_client = boto3.client('cloudformation') + + s3_batch_wrapper = S3BatchWrapper(s3_client, s3control_client, sts_client) + cfn_helper = CloudFormationHelper(cfn_client) scenario = S3BatchScenario(s3_batch_wrapper, cfn_helper) scenario.run_scenario() diff --git a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py index 307a2d68fef..c055801fdbc 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py +++ b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py @@ -15,16 +15,20 @@ class S3BatchWrapper: """Wrapper class for managing S3 Batch Operations.""" - def __init__(self) -> None: + def __init__(self, s3_client: Any, s3control_client: Any, sts_client: Any) -> None: """ - Initialize S3 Batch Operations wrapper. + Initializes the S3BatchWrapper with AWS service clients. - This example uses the default settings specified in your shared credentials - and config files. + :param s3_client: A Boto3 Amazon S3 client. This client provides low-level + access to AWS S3 services. + :param s3control_client: A Boto3 Amazon S3 Control client. This client provides + low-level access to AWS S3 Control services. + :param sts_client: A Boto3 AWS STS client. This client provides low-level + access to AWS STS services. """ - self.s3_client = boto3.client('s3') - self.s3control_client = boto3.client('s3control') - self.sts_client = boto3.client('sts') + self.s3_client = s3_client + self.s3control_client = s3control_client + self.sts_client = sts_client # Get region from the client for bucket creation logic self.region_name = self.s3_client.meta.region_name From 755260851424a7494e9b5d8e29cf483389007d61 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Wed, 13 Aug 2025 03:05:11 -0700 Subject: [PATCH 13/16] test: add service method stubbing pattern to follow Python Examples Standards --- .../s3/scenarios/batch/s3_batch.py | 812 ------------------ .../s3/scenarios/batch/test/conftest.py | 36 + .../s3/scenarios/batch/test/test_s3_batch.py | 352 -------- .../batch/test/test_s3_batch_stubbed.py | 218 +++++ .../batch/test/test_s3_batch_stubber.py | 190 ++++ 5 files changed, 444 insertions(+), 1164 deletions(-) delete mode 100644 python/example_code/s3/scenarios/batch/s3_batch.py create mode 100644 python/example_code/s3/scenarios/batch/test/conftest.py delete mode 100644 python/example_code/s3/scenarios/batch/test/test_s3_batch.py create mode 100644 python/example_code/s3/scenarios/batch/test/test_s3_batch_stubbed.py create mode 100644 python/example_code/s3/scenarios/batch/test/test_s3_batch_stubber.py diff --git a/python/example_code/s3/scenarios/batch/s3_batch.py b/python/example_code/s3/scenarios/batch/s3_batch.py deleted file mode 100644 index b9a71ed6a8a..00000000000 --- a/python/example_code/s3/scenarios/batch/s3_batch.py +++ /dev/null @@ -1,812 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -# snippet-start:[python.example_code.s3control.Batch.scenario] -""" -This module provides functionality for AWS S3 Batch Operations. -It includes classes for managing CloudFormation stacks and S3 batch scenarios. -""" - -import json -import time -import uuid -import sys -from typing import Dict, List, Tuple, Optional, Any - -import boto3 -from botocore.exceptions import ClientError, WaiterError - -sys.path.append("../../../..") -import demo_tools.question as q - -# snippet-start:[python.example_code.s3control.CloudFormationHelper] -class CloudFormationHelper: - """Helper class for managing CloudFormation stack operations.""" - # Change the value of 'region' to your preferred AWS Region. - def __init__(self, region_name: str = 'us-west-2') -> None: - """ - Initialize CloudFormation helper. - - Args: - region_name (str): AWS region name - """ - # Create a CloudFormation client for the specified region - self.cfn_client = boto3.client('cloudformation', region_name=region_name) - - def deploy_cloudformation_stack(self, stack_name: str) -> None: - """ - Deploy a CloudFormation stack with S3 batch operation permissions. - - Args: - stack_name (str): Name of the CloudFormation stack - - Raises: - ClientError: If stack creation fails - """ - try: - # Define the CloudFormation template - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "S3BatchRole": { - "Type": "AWS::IAM::Role", - "Properties": { - "AssumeRolePolicyDocument": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": - "batchoperations.s3.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - }, - "ManagedPolicyArns": [ - "arn:aws:iam::aws:policy/AmazonS3FullAccess" - ], - "Policies": [ - { - "PolicyName": "S3BatchOperationsPolicy", - "PolicyDocument": { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*", - "s3-object-lambda:*" - ], - "Resource": "*" - } - ] - } - } - ] - } - } - }, - "Outputs": { - "S3BatchRoleArn": { - "Description": "ARN of IAM Role for S3 Batch Operations", - "Value": {"Fn::GetAtt": ["S3BatchRole", "Arn"]} - } - } - } - - self.cfn_client.create_stack( - StackName=stack_name, - TemplateBody=json.dumps(template), - Capabilities=['CAPABILITY_IAM'] - ) - - print(f"Creating stack {stack_name}...") - self._wait_for_stack_completion(stack_name, 'CREATE') - print(f"Stack {stack_name} created successfully") - - except ClientError as e: - print(f"Error creating CloudFormation stack: {e}") - raise - - def get_stack_outputs(self, stack_name: str) -> Dict[str, str]: - """ - Get CloudFormation stack outputs. - - Args: - stack_name (str): Name of the CloudFormation stack - - Returns: - dict: Stack outputs - - Raises: - ClientError: If getting stack outputs fails - """ - try: - response = self.cfn_client.describe_stacks(StackName=stack_name) - outputs = {} - if 'Stacks' in response and response['Stacks']: - for output in response['Stacks'][0].get('Outputs', []): - outputs[output['OutputKey']] = output['OutputValue'] - return outputs - - except ClientError as e: - print(f"Error getting stack outputs: {e}") - raise - - def destroy_cloudformation_stack(self, stack_name: str) -> None: - """ - Delete a CloudFormation stack. - - Args: - stack_name (str): Name of the CloudFormation stack - - Raises: - ClientError: If stack deletion fails - """ - try: - self.cfn_client.delete_stack(StackName=stack_name) - print(f"Deleting stack {stack_name}...") - self._wait_for_stack_completion(stack_name, 'DELETE') - print(f"Stack {stack_name} deleted successfully") - - except ClientError as e: - print(f"Error deleting CloudFormation stack: {e}") - raise - - def _wait_for_stack_completion(self, stack_name: str, operation: str) -> None: - """ - Wait for CloudFormation stack operation to complete. - - Args: - stack_name (str): Name of the CloudFormation stack - operation (str): Stack operation (CREATE or DELETE) - - Raises: - WaiterError: If waiting for stack completion fails - """ - try: - waiter = self.cfn_client.get_waiter( - 'stack_create_complete' if operation == 'CREATE' - else 'stack_delete_complete' - ) - waiter.wait( - StackName=stack_name, - WaiterConfig={'Delay': 5, 'MaxAttempts': 60} - ) - except WaiterError as e: - print(f"Error waiting for stack {operation}: {e}") - raise -# snippet-end:[python.example_code.s3control.CloudFormationHelper] - -# snippet-start:[python.example_code.s3control.helper.S3BatchScenario] -class S3BatchScenario: - """Class for managing S3 Batch Operations scenarios.""" - - DASHES = "-" * 80 - STACK_NAME = "MyS3Stack" - - def __init__(self, region_name: str = 'us-west-2') -> None: - """ - Initialize S3 Batch Operations scenario. - - Args: - region_name (str): AWS region name - """ - self.region_name = region_name - self.s3_client = boto3.client('s3', region_name=region_name) - self.s3control_client = boto3.client('s3control', region_name=region_name) - self.sts_client = boto3.client('sts', region_name=region_name) - - def get_account_id(self) -> str: - """ - Get AWS account ID. - - Returns: - str: AWS account ID - """ - return self.sts_client.get_caller_identity()["Account"] - - def create_bucket(self, bucket_name: str) -> None: - """ - Create an S3 bucket. - - Args: - bucket_name (str): Name of the bucket to create - - Raises: - ClientError: If bucket creation fails - """ - try: - if self.region_name != 'us-east-1': - self.s3_client.create_bucket( - Bucket=bucket_name, - CreateBucketConfiguration={ - 'LocationConstraint': self.region_name - } - ) - else: - self.s3_client.create_bucket(Bucket=bucket_name) - print(f"Created bucket: {bucket_name}") - except ClientError as e: - print(f"Error creating bucket: {e}") - raise - - def upload_files_to_bucket(self, bucket_name: str, file_names: List[str]) -> str: - """ - Upload files to S3 bucket including manifest file. - - Args: - bucket_name (str): Target bucket name - file_names (list): List of file names to upload - - Returns: - str: ETag of the manifest file - - Raises: - ClientError: If file upload fails - """ - try: - for file_name in file_names: - if file_name != "job-manifest.csv": - content = f"Content for {file_name}" - self.s3_client.put_object( - Bucket=bucket_name, - Key=file_name, - Body=content.encode('utf-8') - ) - print(f"Uploaded {file_name} to {bucket_name}") - - manifest_content = "" - for file_name in file_names: - if file_name != "job-manifest.csv": - manifest_content += f"{bucket_name},{file_name}\n" - - manifest_response = self.s3_client.put_object( - Bucket=bucket_name, - Key="job-manifest.csv", - Body=manifest_content.encode('utf-8') - ) - print(f"Uploaded manifest file to {bucket_name}") - print(f"Manifest content:\n{manifest_content}") - return manifest_response['ETag'].strip('"') - - except ClientError as e: - print(f"Error uploading files: {e}") - raise - - # snippet-start:[python.example_code.s3control.create_job] - def create_s3_batch_job(self, account_id: str, role_arn: str, manifest_location: str, - report_bucket_name: str) -> str: - """ - Create an S3 batch operation job. - - Args: - account_id (str): AWS account ID - role_arn (str): IAM role ARN for batch operations - manifest_location (str): Location of the manifest file - report_bucket_name (str): Bucket for job reports - - Returns: - str: Job ID - - Raises: - ClientError: If job creation fails - """ - try: - # Extract bucket name from manifest location - bucket_name = manifest_location.split(':::')[1].split('/')[0] - manifest_key = 'job-manifest.csv' - # Get the ETag of the manifest file for verification - manifest_obj = self.s3_client.head_object( - Bucket=bucket_name, - Key=manifest_key - ) - etag = manifest_obj['ETag'].strip('"') - # Create the batch job with specified parameters - response = self.s3control_client.create_job( - AccountId=account_id, - # Define the operation (in this case, adding tags to objects) - Operation={ - 'S3PutObjectTagging': { - 'TagSet': [ - { - 'Key': 'BatchTag', - 'Value': 'BatchValue' - }, - ] - } - }, - # Configure job completion report settings - Report={ - 'Bucket': report_bucket_name, - 'Format': 'Report_CSV_20180820', - 'Enabled': True, - 'Prefix': 'batch-op-reports', - 'ReportScope': 'AllTasks' - }, - Manifest={ - 'Spec': { - 'Format': 'S3BatchOperations_CSV_20180820', - 'Fields': ['Bucket', 'Key'] - }, - 'Location': { - 'ObjectArn': manifest_location, - 'ETag': etag - } - }, - Priority=10, - RoleArn=role_arn, - Description='Batch job for tagging objects', - # Set to True so job starts in Suspended state for demonstration - ConfirmationRequired=True - ) - job_id = response['JobId'] - print(f"The Job id is {job_id}") - return job_id - except ClientError as e: - print(f"Error creating batch job: {e}") - if 'Message' in str(e): - print(f"Detailed error message: {e.response['Message']}") - raise - # snippet-end:[python.example_code.s3control.create_job] - - def check_job_failure_reasons(self, job_id: str, account_id: str) -> List[Dict[str, Any]]: - """ - Check for any failure reasons of a batch job. - - Args: - job_id (str): ID of the batch job - account_id (str): AWS account ID - - Returns: - list: List of failure reasons - - Raises: - ClientError: If checking job failure reasons fails - """ - try: - response = self.s3control_client.describe_job( - AccountId=account_id, - JobId=job_id - ) - if 'FailureReasons' in response['Job']: - for reason in response['Job']['FailureReasons']: - print(f"- {reason}") - return response['Job'].get('FailureReasons', []) - except ClientError as e: - print(f"Error checking job failure reasons: {e}") - raise - - def wait_for_job_ready(self, job_id: str, account_id: str, desired_status: str = 'Ready') -> bool: - """ - Wait for a job to reach the desired status. - - Args: - job_id (str): ID of the batch job - account_id (str): AWS account ID - desired_status (str): Target status to wait for - - Returns: - bool: True if desired status is reached, False otherwise - - Raises: - ClientError: If checking job status fails - """ - print(f"Waiting for job to become {desired_status}...") - max_attempts = 60 - attempt = 0 - while attempt < max_attempts: - try: - response = self.s3control_client.describe_job( - AccountId=account_id, - JobId=job_id - ) - current_status = response['Job']['Status'] - print(f"Current job status: {current_status}") - if current_status == desired_status: - return True - # For jobs with ConfirmationRequired=True, they start in Suspended state - # and need to be activated - if current_status == 'Suspended': - print("Job is in Suspended state, can proceed with activation") - return True - if current_status in ['Active', 'Failed', 'Cancelled', 'Complete']: - print(f"Job is in {current_status} state, cannot reach {desired_status} status") - if 'FailureReasons' in response['Job']: - print("Failure reasons:") - for reason in response['Job']['FailureReasons']: - print(f"- {reason}") - return False - - time.sleep(20) - attempt += 1 - except ClientError as e: - print(f"Error checking job status: {e}") - raise - print(f"Timeout waiting for job to become {desired_status}") - return False - - # snippet-start:[python.example_code.s3control.update_job_priority] - def update_job_priority(self, job_id: str, account_id: str) -> None: - """ - Update the priority of a batch job and start it. - - Args: - job_id (str): ID of the batch job - account_id (str): AWS account ID - """ - try: - # Check current job status - response = self.s3control_client.describe_job( - AccountId=account_id, - JobId=job_id - ) - current_status = response['Job']['Status'] - print(f"Current job status: {current_status}") - - # Only update priority if job is in a state that allows it - if current_status in ['Ready', 'Suspended']: - self.s3control_client.update_job_priority( - AccountId=account_id, - JobId=job_id, - Priority=60 - ) - print("The job priority was updated") - - # Try to activate the job after priority update - try: - self.s3control_client.update_job_status( - AccountId=account_id, - JobId=job_id, - RequestedJobStatus='Ready' - ) - print("Job activated successfully") - except ClientError as activation_error: - print(f"Note: Could not activate job automatically: {activation_error}") - print("Job priority was updated successfully. Job may need manual activation in the console.") - elif current_status in ['Active', 'Completing', 'Complete']: - print(f"Job is in '{current_status}' state - priority cannot be updated") - if current_status == 'Completing': - print("Job is finishing up and will complete soon.") - elif current_status == 'Complete': - print("Job has already completed successfully.") - else: - print("Job is currently running.") - else: - print(f"Job is in '{current_status}' state - priority update not allowed") - - except ClientError as e: - print(f"Error updating job priority: {e}") - # Don't raise the error to allow the scenario to continue - print("Continuing with the scenario...") - return - # snippet-end:[python.example_code.s3control.update_job_priority] - - def cancel_job(self, job_id: str, account_id: str) -> None: - """ - Cancel an S3 batch job. - - Args: - job_id (str): ID of the batch job - account_id (str): AWS account ID - """ - try: - self.s3control_client.update_job_status( - AccountId=account_id, - JobId=job_id, - RequestedJobStatus='Cancelled' - ) - print(f"Job {job_id} was successfully canceled.") - except ClientError as e: - print(f"Error canceling job: {e}") - raise - - # snippet-start:[python.example_code.s3control.describe_job] - def describe_job_details(self, job_id: str, account_id: str) -> None: - """ - Describe detailed information about a batch job. - - Args: - job_id (str): ID of the batch job - account_id (str): AWS account ID - """ - try: - response = self.s3control_client.describe_job( - AccountId=account_id, - JobId=job_id - ) - job = response['Job'] - print(f"Job ID: {job['JobId']}") - print(f"Description: {job.get('Description', 'N/A')}") - print(f"Status: {job['Status']}") - print(f"Role ARN: {job['RoleArn']}") - print(f"Priority: {job['Priority']}") - if 'ProgressSummary' in job: - progress = job['ProgressSummary'] - print(f"Progress Summary: Total={progress.get('TotalNumberOfTasks', 0)}, " - f"Succeeded={progress.get('NumberOfTasksSucceeded', 0)}, " - f"Failed={progress.get('NumberOfTasksFailed', 0)}") - except ClientError as e: - print(f"Error describing job: {e}") - raise - # snippet-end:[python.example_code.s3control.describe_job] - - # snippet-start:[python.example_code.s3control.get_job_tagging] - def get_job_tags(self, job_id: str, account_id: str) -> None: - """ - Get tags associated with a batch job. - - Args: - job_id (str): ID of the batch job - account_id (str): AWS account ID - """ - try: - response = self.s3control_client.get_job_tagging( - AccountId=account_id, - JobId=job_id - ) - tags = response.get('Tags', []) - if tags: - print(f"Tags for job {job_id}:") - for tag in tags: - print(f" {tag['Key']}: {tag['Value']}") - else: - print(f"No tags found for job ID: {job_id}") - except ClientError as e: - print(f"Error getting job tags: {e}") - raise - # snippet-end:[python.example_code.s3control.get_job_tagging] - - # snippet-start:[python.example_code.s3control.put_job_tagging] - def put_job_tags(self, job_id: str, account_id: str) -> None: - """ - Add tags to a batch job. - - Args: - job_id (str): ID of the batch job - account_id (str): AWS account ID - """ - try: - self.s3control_client.put_job_tagging( - AccountId=account_id, - JobId=job_id, - Tags=[ - {'Key': 'Environment', 'Value': 'Development'}, - {'Key': 'Team', 'Value': 'DataProcessing'} - ] - ) - print(f"Additional tags were added to job {job_id}") - except ClientError as e: - print(f"Error adding job tags: {e}") - raise - # snippet-end:[python.example_code.s3control.put_job_tagging] - - # snippet-start:[python.example_code.s3control.list_jobs] - def list_jobs(self, account_id: str) -> None: - """ - List all batch jobs for the account. - - Args: - account_id (str): AWS account ID - """ - try: - response = self.s3control_client.list_jobs( - AccountId=account_id, - JobStatuses=['Active', 'Complete', 'Cancelled', 'Failed', 'New', 'Paused', 'Pausing', 'Preparing', 'Ready', 'Suspended'] - ) - jobs = response.get('Jobs', []) - for job in jobs: - print(f"The job id is {job['JobId']}") - print(f"The job priority is {job['Priority']}") - except ClientError as e: - print(f"Error listing jobs: {e}") - raise - # snippet-end:[python.example_code.s3control.list_jobs] - - # snippet-start:[python.example_code.s3control.delete_job_tagging] - def delete_job_tags(self, job_id: str, account_id: str) -> None: - """ - Delete all tags from a batch job. - - Args: - job_id (str): ID of the batch job - account_id (str): AWS account ID - """ - try: - self.s3control_client.delete_job_tagging( - AccountId=account_id, - JobId=job_id - ) - print(f"You have successfully deleted {job_id} tagging.") - except ClientError as e: - print(f"Error deleting job tags: {e}") - raise - # snippet-end:[python.example_code.s3control.delete_job_tagging] - - def cleanup_resources(self, bucket_name: str, file_names: List[str]) -> None: - """ - Clean up all resources created during the scenario. - - Args: - bucket_name (str): Name of the bucket to clean up - file_names (list): List of files to delete - - Raises: - ClientError: If cleanup fails - """ - try: - for file_name in file_names: - self.s3_client.delete_object(Bucket=bucket_name, Key=file_name) - print(f"Deleted {file_name}") - - response = self.s3_client.list_objects_v2( - Bucket=bucket_name, - Prefix='batch-op-reports/' - ) - if 'Contents' in response: - for obj in response['Contents']: - self.s3_client.delete_object( - Bucket=bucket_name, - Key=obj['Key'] - ) - print(f"Deleted {obj['Key']}") - - self.s3_client.delete_bucket(Bucket=bucket_name) - print(f"Deleted bucket {bucket_name}") - except ClientError as e: - print(f"Error in cleanup: {e}") - raise -# snippet-end:[python.example_code.s3control.helper.S3BatchScenario] - - -def wait_for_input() -> None: - """ - Wait for user input to continue. - - Returns: - None - """ - q.ask("\nPress Enter to continue...") - print() - - -def setup_resources(scenario: S3BatchScenario, bucket_name: str, file_names: List[str]) -> Tuple[str, str]: - """ - Set up initial resources for the scenario. - - Args: - scenario: S3BatchScenario instance - bucket_name (str): Name of the bucket to create - file_names (list): List of files to upload - - Returns: - tuple: Manifest location and report bucket ARN - """ - print("\nSetting up required resources...") - scenario.create_bucket(bucket_name) - report_bucket_arn = f"arn:aws:s3:::{bucket_name}" - manifest_location = f"arn:aws:s3:::{bucket_name}/job-manifest.csv" - scenario.upload_files_to_bucket(bucket_name, file_names) - return manifest_location, report_bucket_arn - - -def main() -> None: - """Main function to run the S3 Batch Operations scenario.""" - region_name = 'us-west-2' - scenario = S3BatchScenario(region_name) - cfn_helper = CloudFormationHelper(region_name) - account_id = scenario.get_account_id() - # Generate a unique bucket name using UUID - bucket_name = f"demo-s3-batch-{str(uuid.uuid4())}" - # Define test files to be created and processed - file_names = [ - "job-manifest.csv", - "object-key-1.txt", - "object-key-2.txt", - "object-key-3.txt", - "object-key-4.txt" - ] - - print(scenario.DASHES) - print("Welcome to the Amazon S3 Batch basics scenario.") - print(""" - S3 Batch operations enables efficient and cost-effective processing of large-scale - data stored in Amazon S3. It automatically scales resources to handle varying workloads - without the need for manual intervention. - - This Python program walks you through Amazon S3 Batch operations. - """) - - try: - # Deploy CloudFormation stack for IAM roles - print("Deploying CloudFormation stack...") - cfn_helper.deploy_cloudformation_stack(scenario.STACK_NAME) - # Get the created IAM role ARN from stack outputs - stack_outputs = cfn_helper.get_stack_outputs(scenario.STACK_NAME) - iam_role_arn = stack_outputs.get('S3BatchRoleArn') - # Set up S3 bucket and upload test files - manifest_location, report_bucket_arn = setup_resources( - scenario, bucket_name, file_names - ) - - wait_for_input() - - print("\n1. Creating S3 Batch Job...") - job_id = scenario.create_s3_batch_job( - account_id, - iam_role_arn, - manifest_location, - report_bucket_arn - ) - - time.sleep(5) - failure_reasons = scenario.check_job_failure_reasons(job_id, account_id) - if failure_reasons: - print("\nJob failed. Please fix the issues and try again.") - if not q.ask( - "Do you want to proceed with the rest of the operations? (y/n): ", q.is_yesno - ): - raise ValueError("Job failed, stopping execution") - - wait_for_input() - print("\n" + scenario.DASHES) - print("2. Update an existing S3 Batch Operations job's priority") - print("In this step, we modify the job priority value. The higher the number, the higher the priority.") - scenario.update_job_priority(job_id, account_id) - - wait_for_input() - print("\n" + scenario.DASHES) - print("3. Cancel the S3 Batch job") - cancel_job = q.ask("Do you want to cancel the Batch job? (y/n): ", q.is_yesno) - if cancel_job: - scenario.cancel_job(job_id, account_id) - else: - print(f"Job {job_id} was not canceled.") - - wait_for_input() - print("\n" + scenario.DASHES) - print("4. Describe the job that was just created") - scenario.describe_job_details(job_id, account_id) - - wait_for_input() - print("\n" + scenario.DASHES) - print("5. Describe the tags associated with the job") - scenario.get_job_tags(job_id, account_id) - - wait_for_input() - print("\n" + scenario.DASHES) - print("6. Update Batch Job Tags") - scenario.put_job_tags(job_id, account_id) - - wait_for_input() - print("\n" + scenario.DASHES) - print("7. List Batch Jobs") - scenario.list_jobs(account_id) - - wait_for_input() - print("\n" + scenario.DASHES) - print("8. Delete the Amazon S3 Batch job tagging") - delete_tags = q.ask("Do you want to delete Batch job tagging? (y/n): ", q.is_yesno) - if delete_tags: - scenario.delete_job_tags(job_id, account_id) - - print("\n" + scenario.DASHES) - if q.ask( - "Do you want to delete the AWS resources used in this scenario? (y/n): ", q.is_yesno - ): - scenario.cleanup_resources(bucket_name, file_names) - cfn_helper.destroy_cloudformation_stack(scenario.STACK_NAME) - - except Exception as e: - print(f"An error occurred: {e}") - raise - - print("\nThe Amazon S3 Batch scenario has successfully completed.") - print(scenario.DASHES) - - -if __name__ == "__main__": - main() -# snippet-end:[python.example_code.s3control.Batch.scenario] diff --git a/python/example_code/s3/scenarios/batch/test/conftest.py b/python/example_code/s3/scenarios/batch/test/conftest.py new file mode 100644 index 00000000000..774e8801dc7 --- /dev/null +++ b/python/example_code/s3/scenarios/batch/test/conftest.py @@ -0,0 +1,36 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Shared test fixtures for S3 batch operations tests.""" + +import boto3 +import pytest +from moto import mock_s3, mock_s3control, mock_sts + +from test_s3_batch_stubber import S3BatchStubber +from s3_batch_wrapper import S3BatchWrapper +from cloudformation_helper import CloudFormationHelper + + +class ScenarioData: + """Holds data for scenario tests.""" + + def __init__(self, wrapper, cfn_helper, stubber): + self.wrapper = wrapper + self.cfn_helper = cfn_helper + self.stubber = stubber + + +@pytest.fixture +def scenario_data(make_stubber): + """Create scenario data with stubbed clients.""" + s3_client = boto3.client("s3", region_name="us-east-1") + s3control_client = boto3.client("s3control", region_name="us-east-1") + sts_client = boto3.client("sts", region_name="us-east-1") + cfn_client = boto3.client("cloudformation", region_name="us-east-1") + + wrapper = S3BatchWrapper(s3_client, s3control_client, sts_client) + cfn_helper = CloudFormationHelper(cfn_client) + stubber = make_stubber(S3BatchStubber, s3_client, s3control_client, sts_client) + + return ScenarioData(wrapper, cfn_helper, stubber) \ No newline at end of file diff --git a/python/example_code/s3/scenarios/batch/test/test_s3_batch.py b/python/example_code/s3/scenarios/batch/test/test_s3_batch.py deleted file mode 100644 index 78d915865db..00000000000 --- a/python/example_code/s3/scenarios/batch/test/test_s3_batch.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 - -"""Unit tests for S3 batch operations module.""" - -import json -import pytest -from unittest.mock import Mock, patch -from botocore.exceptions import ClientError - -from s3_batch import CloudFormationHelper, S3BatchScenario, setup_resources - - -class TestCloudFormationHelper: - """Test cases for CloudFormationHelper class.""" - - @pytest.fixture - def cfn_helper(self): - """Create CloudFormationHelper instance for testing.""" - return CloudFormationHelper('us-west-2') - - @patch('boto3.client') - def test_init(self, mock_boto3_client): - """Test CloudFormationHelper initialization.""" - CloudFormationHelper('us-east-1') - mock_boto3_client.assert_called_with('cloudformation', region_name='us-east-1') - - @patch('boto3.client') - def test_deploy_cloudformation_stack_success(self, mock_boto3_client, cfn_helper): - """Test successful CloudFormation stack deployment.""" - mock_client = Mock() - mock_boto3_client.return_value = mock_client - cfn_helper.cfn_client = mock_client - with patch.object(cfn_helper, '_wait_for_stack_completion'): - cfn_helper.deploy_cloudformation_stack('test-stack') - mock_client.create_stack.assert_called_once() - call_args = mock_client.create_stack.call_args - assert call_args[1]['StackName'] == 'test-stack' - assert 'CAPABILITY_IAM' in call_args[1]['Capabilities'] - - # Verify the template includes AmazonS3FullAccess policy - template_body = json.loads(call_args[1]['TemplateBody']) - assert 'ManagedPolicyArns' in template_body['Resources']['S3BatchRole']['Properties'] - assert 'arn:aws:iam::aws:policy/AmazonS3FullAccess' in template_body['Resources']['S3BatchRole']['Properties']['ManagedPolicyArns'] - - @patch('boto3.client') - def test_deploy_cloudformation_stack_failure(self, mock_boto3_client, cfn_helper): - """Test CloudFormation stack deployment failure.""" - mock_client = Mock() - mock_client.create_stack.side_effect = ClientError( - {'Error': {'Code': 'ValidationError', 'Message': 'Invalid template'}}, - 'CreateStack' - ) - mock_boto3_client.return_value = mock_client - cfn_helper.cfn_client = mock_client - with pytest.raises(ClientError): - cfn_helper.deploy_cloudformation_stack('test-stack') - - @patch('boto3.client') - def test_get_stack_outputs_success(self, mock_boto3_client, cfn_helper): - """Test successful retrieval of stack outputs.""" - mock_client = Mock() - mock_client.describe_stacks.return_value = { - 'Stacks': [{ - 'Outputs': [ - {'OutputKey': 'S3BatchRoleArn', 'OutputValue': 'arn:aws:iam::123456789012:role/test-role'} - ] - }] - } - mock_boto3_client.return_value = mock_client - cfn_helper.cfn_client = mock_client - - outputs = cfn_helper.get_stack_outputs('test-stack') - assert outputs['S3BatchRoleArn'] == 'arn:aws:iam::123456789012:role/test-role' - - @patch('boto3.client') - def test_destroy_cloudformation_stack_success(self, mock_boto3_client, cfn_helper): - """Test successful CloudFormation stack deletion.""" - mock_client = Mock() - mock_boto3_client.return_value = mock_client - cfn_helper.cfn_client = mock_client - - with patch.object(cfn_helper, '_wait_for_stack_completion'): - cfn_helper.destroy_cloudformation_stack('test-stack') - - mock_client.delete_stack.assert_called_once_with(StackName='test-stack') - - - -class TestS3BatchScenario: - """Test cases for S3BatchScenario class.""" - - @pytest.fixture - def s3_scenario(self): - """Create S3BatchScenario instance for testing.""" - return S3BatchScenario('us-west-2') - - @patch('boto3.client') - def test_init(self, mock_boto3_client): - """Test S3BatchScenario initialization.""" - scenario = S3BatchScenario('us-east-1') - assert mock_boto3_client.call_count == 3 - assert scenario.region_name == 'us-east-1' - - @patch('boto3.client') - def test_get_account_id(self, mock_boto3_client, s3_scenario): - """Test getting AWS account ID.""" - mock_sts_client = Mock() - mock_sts_client.get_caller_identity.return_value = {'Account': '123456789012'} - s3_scenario.sts_client = mock_sts_client - - account_id = s3_scenario.get_account_id() - assert account_id == '123456789012' - - @patch('boto3.client') - def test_create_bucket_us_west_2(self, mock_boto3_client, s3_scenario): - """Test bucket creation in us-west-2.""" - mock_s3_client = Mock() - s3_scenario.s3_client = mock_s3_client - - s3_scenario.create_bucket('test-bucket') - - mock_s3_client.create_bucket.assert_called_once_with( - Bucket='test-bucket', - CreateBucketConfiguration={'LocationConstraint': 'us-west-2'} - ) - - @patch('boto3.client') - def test_create_bucket_us_east_1(self, mock_boto3_client): - """Test bucket creation in us-east-1.""" - scenario = S3BatchScenario('us-east-1') - mock_s3_client = Mock() - scenario.s3_client = mock_s3_client - - scenario.create_bucket('test-bucket') - - mock_s3_client.create_bucket.assert_called_once_with(Bucket='test-bucket') - - @patch('boto3.client') - def test_upload_files_to_bucket(self, mock_boto3_client, s3_scenario): - """Test uploading files to S3 bucket.""" - mock_s3_client = Mock() - mock_s3_client.put_object.return_value = {'ETag': '"test-etag"'} - s3_scenario.s3_client = mock_s3_client - - file_names = ['job-manifest.csv', 'test-file.txt'] - etag = s3_scenario.upload_files_to_bucket('test-bucket', file_names) - - assert etag == 'test-etag' - assert mock_s3_client.put_object.call_count == 2 - - @patch('boto3.client') - def test_create_s3_batch_job_success(self, mock_boto3_client, s3_scenario): - """Test successful S3 batch job creation.""" - mock_s3_client = Mock() - mock_s3_client.head_object.return_value = {'ETag': '"test-etag"'} - mock_s3control_client = Mock() - mock_s3control_client.create_job.return_value = {'JobId': 'test-job-id'} - - s3_scenario.s3_client = mock_s3_client - s3_scenario.s3control_client = mock_s3control_client - - job_id = s3_scenario.create_s3_batch_job( - '123456789012', - 'arn:aws:iam::123456789012:role/test-role', - 'arn:aws:s3:::test-bucket/job-manifest.csv', - 'arn:aws:s3:::test-bucket' - ) - - assert job_id == 'test-job-id' - mock_s3control_client.create_job.assert_called_once() - - # Verify ConfirmationRequired is set to False - call_args = mock_s3control_client.create_job.call_args - assert call_args[1]['ConfirmationRequired'] is False - - @patch('boto3.client') - def test_check_job_failure_reasons(self, mock_boto3_client, s3_scenario): - """Test checking job failure reasons.""" - mock_s3control_client = Mock() - mock_s3control_client.describe_job.return_value = { - 'Job': { - 'FailureReasons': ['Reason 1', 'Reason 2'] - } - } - s3_scenario.s3control_client = mock_s3control_client - - reasons = s3_scenario.check_job_failure_reasons('test-job-id', '123456789012') - - assert reasons == ['Reason 1', 'Reason 2'] - - @patch('boto3.client') - @patch('time.sleep') - def test_wait_for_job_ready_success(self, mock_sleep, mock_boto3_client, s3_scenario): - """Test waiting for job to become ready.""" - mock_s3control_client = Mock() - mock_s3control_client.describe_job.return_value = { - 'Job': {'Status': 'Ready'} - } - s3_scenario.s3control_client = mock_s3control_client - - result = s3_scenario.wait_for_job_ready('test-job-id', '123456789012') - - assert result is True - - @patch('boto3.client') - @patch('time.sleep') - def test_wait_for_job_ready_suspended(self, mock_sleep, mock_boto3_client, s3_scenario): - """Test waiting for job with Suspended status.""" - mock_s3control_client = Mock() - mock_s3control_client.describe_job.return_value = { - 'Job': {'Status': 'Suspended'} - } - s3_scenario.s3control_client = mock_s3control_client - - result = s3_scenario.wait_for_job_ready('test-job-id', '123456789012') - - assert result is True - - @patch('boto3.client') - def test_update_job_priority_success(self, mock_boto3_client, s3_scenario): - """Test successful job priority update.""" - mock_s3control_client = Mock() - mock_s3control_client.describe_job.return_value = { - 'Job': {'Status': 'Suspended'} - } - s3_scenario.s3control_client = mock_s3control_client - - s3_scenario.update_job_priority('test-job-id', '123456789012') - - mock_s3control_client.update_job_priority.assert_called_once() - mock_s3control_client.update_job_status.assert_called_once() - - @patch('boto3.client') - def test_update_job_priority_with_ready_status(self, mock_boto3_client, s3_scenario): - """Test job priority update with Ready status.""" - mock_s3control_client = Mock() - mock_s3control_client.describe_job.return_value = { - 'Job': {'Status': 'Ready'} - } - s3_scenario.s3control_client = mock_s3control_client - - s3_scenario.update_job_priority('test-job-id', '123456789012') - - mock_s3control_client.update_job_priority.assert_called_once() - mock_s3control_client.update_job_status.assert_called_once() - - @patch('boto3.client') - def test_update_job_priority_error_handling(self, mock_boto3_client, s3_scenario): - """Test error handling in job priority update.""" - mock_s3control_client = Mock() - mock_s3control_client.describe_job.return_value = { - 'Job': {'Status': 'Suspended'} - } - mock_s3control_client.update_job_priority.side_effect = ClientError( - {'Error': {'Code': 'InvalidRequest', 'Message': 'Cannot update priority'}}, - 'UpdateJobPriority' - ) - mock_s3control_client.update_job_status = Mock() - s3_scenario.s3control_client = mock_s3control_client - - # Should not raise exception due to error handling - s3_scenario.update_job_priority('test-job-id', '123456789012') - - # Should still try to activate the job even if priority update fails - mock_s3control_client.update_job_status.assert_called_once() - - @patch('boto3.client') - def test_cleanup_resources(self, mock_boto3_client, s3_scenario): - """Test resource cleanup.""" - mock_s3_client = Mock() - mock_s3_client.list_objects_v2.return_value = { - 'Contents': [{'Key': 'batch-op-reports/report1.csv'}] - } - s3_scenario.s3_client = mock_s3_client - - file_names = ['test-file.txt'] - s3_scenario.cleanup_resources('test-bucket', file_names) - - assert mock_s3_client.delete_object.call_count == 2 # file + report - mock_s3_client.delete_bucket.assert_called_once_with(Bucket='test-bucket') - - -class TestUtilityFunctions: - """Test cases for utility functions.""" - - @patch('s3_batch.input', return_value='c') - def test_wait_for_input_valid(self, mock_input): - """Test wait_for_input with valid input.""" - # pylint: disable=import-outside-toplevel - from s3_batch import wait_for_input - wait_for_input() # Should not raise exception - - @patch('s3_batch.input', side_effect=['invalid', 'c']) - def test_wait_for_input_invalid_then_valid(self, mock_input): - """Test wait_for_input with invalid then valid input.""" - # pylint: disable=import-outside-toplevel - from s3_batch import wait_for_input - wait_for_input() # Should not raise exception - - def test_setup_resources(self): - """Test setup_resources function.""" - mock_scenario = Mock() - - manifest_location, report_bucket_arn = setup_resources( - mock_scenario, 'test-bucket', ['file1.txt', 'file2.txt'] - ) - - assert manifest_location == 'arn:aws:s3:::test-bucket/job-manifest.csv' - assert report_bucket_arn == 'arn:aws:s3:::test-bucket' - mock_scenario.create_bucket.assert_called_once_with('test-bucket') - mock_scenario.upload_files_to_bucket.assert_called_once() - - -class TestErrorHandling: - """Test cases for error handling scenarios.""" - - @pytest.fixture - def s3_scenario(self): - """Create S3BatchScenario instance for testing.""" - return S3BatchScenario('us-west-2') - - @patch('boto3.client') - def test_create_bucket_client_error(self, mock_boto3_client, s3_scenario): - """Test bucket creation with ClientError.""" - mock_s3_client = Mock() - mock_s3_client.create_bucket.side_effect = ClientError( - {'Error': {'Code': 'BucketAlreadyExists', 'Message': 'Bucket exists'}}, - 'CreateBucket' - ) - s3_scenario.s3_client = mock_s3_client - - with pytest.raises(ClientError): - s3_scenario.create_bucket('test-bucket') - - @patch('boto3.client') - def test_create_s3_batch_job_client_error(self, mock_boto3_client, s3_scenario): - """Test S3 batch job creation with ClientError.""" - mock_s3_client = Mock() - mock_s3_client.head_object.side_effect = ClientError( - {'Error': {'Code': 'NoSuchKey', 'Message': 'Key not found'}}, - 'HeadObject' - ) - s3_scenario.s3_client = mock_s3_client - - with pytest.raises(ClientError): - s3_scenario.create_s3_batch_job( - '123456789012', - 'arn:aws:iam::123456789012:role/test-role', - 'arn:aws:s3:::test-bucket/job-manifest.csv', - 'arn:aws:s3:::test-bucket' - ) \ No newline at end of file diff --git a/python/example_code/s3/scenarios/batch/test/test_s3_batch_stubbed.py b/python/example_code/s3/scenarios/batch/test/test_s3_batch_stubbed.py new file mode 100644 index 00000000000..46b2b005a2a --- /dev/null +++ b/python/example_code/s3/scenarios/batch/test/test_s3_batch_stubbed.py @@ -0,0 +1,218 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Unit tests for S3 batch operations using service method stubbing patterns.""" + +from botocore.exceptions import ClientError +import pytest + + +class MockManager: + def __init__(self, stub_runner, scenario_data, input_mocker): + self.scenario_data = scenario_data + self.stub_runner = stub_runner + self.account_id = "123456789012" + self.bucket_name = "test-batch-bucket" + self.job_id = "test-job-123" + self.role_arn = "arn:aws:iam::123456789012:role/S3BatchRole" + self.manifest_location = f"arn:aws:s3:::{self.bucket_name}/job-manifest.csv" + self.etag = "test-etag-123" + self.file_names = ["job-manifest.csv", "object-key-1.txt", "object-key-2.txt"] + + # Mock user inputs + answers = ["y", "n", "y"] # yes to proceed, no to cancel, yes to cleanup + input_mocker.mock_answers(answers) + + def setup_stubs(self, error, stop_on, stubber): + with self.stub_runner(error, stop_on) as runner: + runner.add(stubber.stub_get_caller_identity, self.account_id) + runner.add(stubber.stub_create_bucket, self.bucket_name) + runner.add(stubber.stub_put_object, self.bucket_name, "object-key-1.txt") + runner.add(stubber.stub_put_object, self.bucket_name, "object-key-2.txt") + runner.add(stubber.stub_put_object, self.bucket_name, "job-manifest.csv", etag=self.etag) + runner.add(stubber.stub_head_object, self.bucket_name, "job-manifest.csv", etag=self.etag) + runner.add(stubber.stub_create_job, self.account_id, self.job_id) + runner.add(stubber.stub_describe_job, self.account_id, self.job_id, status="Suspended") + runner.add(stubber.stub_describe_job, self.account_id, self.job_id, status="Suspended") + runner.add(stubber.stub_update_job_priority, self.account_id, self.job_id) + runner.add(stubber.stub_update_job_status, self.account_id, self.job_id, "Ready") + runner.add(stubber.stub_describe_job, self.account_id, self.job_id, status="Ready") + runner.add(stubber.stub_get_job_tagging, self.account_id, self.job_id, tags=[]) + runner.add(stubber.stub_put_job_tagging, self.account_id, self.job_id) + runner.add(stubber.stub_list_jobs, self.account_id, [{"JobId": self.job_id, "Priority": 60}]) + runner.add(stubber.stub_delete_job_tagging, self.account_id, self.job_id) + + def setup_cleanup_stubs(self, stubber): + with self.stub_runner(None, None) as runner: + for file_name in self.file_names: + runner.add(stubber.stub_delete_object, self.bucket_name, file_name) + runner.add(stubber.stub_list_objects_v2, self.bucket_name, prefix="batch-op-reports/", contents=[]) + runner.add(stubber.stub_delete_bucket, self.bucket_name) + + +@pytest.fixture +def mock_mgr(stub_runner, scenario_data, input_mocker): + return MockManager(stub_runner, scenario_data, input_mocker) + + +def test_get_account_id(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 0, mock_mgr.scenario_data.stubber) + + account_id = mock_mgr.scenario_data.wrapper.get_account_id() + + assert account_id == mock_mgr.account_id + + +def test_create_bucket(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 1, mock_mgr.scenario_data.stubber) + + mock_mgr.scenario_data.wrapper.create_bucket(mock_mgr.bucket_name) + + capt = capsys.readouterr() + assert f"Created bucket: {mock_mgr.bucket_name}" in capt.out + + +def test_upload_files_to_bucket(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 4, mock_mgr.scenario_data.stubber) + + etag = mock_mgr.scenario_data.wrapper.upload_files_to_bucket( + mock_mgr.bucket_name, mock_mgr.file_names + ) + + assert etag == mock_mgr.etag + capt = capsys.readouterr() + assert "Uploaded manifest file" in capt.out + + +def test_create_s3_batch_job(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 6, mock_mgr.scenario_data.stubber) + + job_id = mock_mgr.scenario_data.wrapper.create_s3_batch_job( + mock_mgr.account_id, + mock_mgr.role_arn, + mock_mgr.manifest_location, + f"arn:aws:s3:::{mock_mgr.bucket_name}" + ) + + assert job_id == mock_mgr.job_id + capt = capsys.readouterr() + assert f"The Job id is {mock_mgr.job_id}" in capt.out + + +def test_check_job_failure_reasons(mock_mgr): + mock_mgr.setup_stubs(None, 7, mock_mgr.scenario_data.stubber) + + reasons = mock_mgr.scenario_data.wrapper.check_job_failure_reasons( + mock_mgr.job_id, mock_mgr.account_id + ) + + assert reasons == [] + + +def test_update_job_priority(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 10, mock_mgr.scenario_data.stubber) + + mock_mgr.scenario_data.wrapper.update_job_priority( + mock_mgr.job_id, mock_mgr.account_id + ) + + capt = capsys.readouterr() + assert "The job priority was updated" in capt.out + + +def test_describe_job_details(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 11, mock_mgr.scenario_data.stubber) + + mock_mgr.scenario_data.wrapper.describe_job_details( + mock_mgr.job_id, mock_mgr.account_id + ) + + capt = capsys.readouterr() + assert f"Job ID: {mock_mgr.job_id}" in capt.out + + +def test_get_job_tags(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 12, mock_mgr.scenario_data.stubber) + + mock_mgr.scenario_data.wrapper.get_job_tags( + mock_mgr.job_id, mock_mgr.account_id + ) + + capt = capsys.readouterr() + assert f"No tags found for job ID: {mock_mgr.job_id}" in capt.out + + +def test_put_job_tags(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 13, mock_mgr.scenario_data.stubber) + + mock_mgr.scenario_data.wrapper.put_job_tags( + mock_mgr.job_id, mock_mgr.account_id + ) + + capt = capsys.readouterr() + assert f"Additional tags were added to job {mock_mgr.job_id}" in capt.out + + +def test_list_jobs(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 14, mock_mgr.scenario_data.stubber) + + mock_mgr.scenario_data.wrapper.list_jobs(mock_mgr.account_id) + + capt = capsys.readouterr() + assert f"The job id is {mock_mgr.job_id}" in capt.out + assert "The job priority is 60" in capt.out + + +def test_delete_job_tags(mock_mgr, capsys): + mock_mgr.setup_stubs(None, 15, mock_mgr.scenario_data.stubber) + + mock_mgr.scenario_data.wrapper.delete_job_tags( + mock_mgr.job_id, mock_mgr.account_id + ) + + capt = capsys.readouterr() + assert f"You have successfully deleted {mock_mgr.job_id} tagging." in capt.out + + +def test_cleanup_resources(mock_mgr, capsys): + mock_mgr.setup_cleanup_stubs(mock_mgr.scenario_data.stubber) + + mock_mgr.scenario_data.wrapper.cleanup_resources( + mock_mgr.bucket_name, mock_mgr.file_names + ) + + capt = capsys.readouterr() + assert f"Deleted bucket {mock_mgr.bucket_name}" in capt.out + + +@pytest.mark.parametrize( + "error, stop_on_index", + [ + ("TESTERROR-stub_get_caller_identity", 0), + ("TESTERROR-stub_create_bucket", 1), + ("TESTERROR-stub_create_job", 6), + ("TESTERROR-stub_update_job_priority", 9), + ], +) +def test_wrapper_errors(mock_mgr, caplog, error, stop_on_index): + mock_mgr.setup_stubs(error, stop_on_index, mock_mgr.scenario_data.stubber) + + with pytest.raises(ClientError) as exc_info: + if "get_caller_identity" in error: + mock_mgr.scenario_data.wrapper.get_account_id() + elif "create_bucket" in error: + mock_mgr.scenario_data.wrapper.create_bucket(mock_mgr.bucket_name) + elif "create_job" in error: + mock_mgr.scenario_data.wrapper.create_s3_batch_job( + mock_mgr.account_id, + mock_mgr.role_arn, + mock_mgr.manifest_location, + f"arn:aws:s3:::{mock_mgr.bucket_name}" + ) + elif "update_job_priority" in error: + mock_mgr.scenario_data.wrapper.update_job_priority( + mock_mgr.job_id, mock_mgr.account_id + ) + + assert exc_info.value.response["Error"]["Code"] == error + assert error in caplog.text \ No newline at end of file diff --git a/python/example_code/s3/scenarios/batch/test/test_s3_batch_stubber.py b/python/example_code/s3/scenarios/batch/test/test_s3_batch_stubber.py new file mode 100644 index 00000000000..dc334666ce0 --- /dev/null +++ b/python/example_code/s3/scenarios/batch/test/test_s3_batch_stubber.py @@ -0,0 +1,190 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +"""Stubber functions for S3 batch operations tests.""" + +from botocore.stub import Stubber + + +class S3BatchStubber: + """Stubber for S3 Batch Operations service methods.""" + + def __init__(self, s3_client, s3control_client, sts_client): + """Initialize stubbers for all clients.""" + self.s3_stubber = Stubber(s3_client) + self.s3control_stubber = Stubber(s3control_client) + self.sts_stubber = Stubber(sts_client) + + def stub_get_caller_identity(self, account_id, error_code=None): + """Stub STS get_caller_identity method.""" + expected_params = {} + if error_code is None: + response = {"Account": account_id} + self.sts_stubber.add_response("get_caller_identity", response, expected_params) + else: + self.sts_stubber.add_client_error("get_caller_identity", error_code, expected_params=expected_params) + + def stub_create_bucket(self, bucket_name, region=None, error_code=None): + """Stub S3 create_bucket method.""" + expected_params = {"Bucket": bucket_name} + if region and region != "us-east-1": + expected_params["CreateBucketConfiguration"] = {"LocationConstraint": region} + + if error_code is None: + response = {} + self.s3_stubber.add_response("create_bucket", response, expected_params) + else: + self.s3_stubber.add_client_error("create_bucket", error_code, expected_params=expected_params) + + def stub_put_object(self, bucket_name, key, etag="test-etag", error_code=None): + """Stub S3 put_object method.""" + expected_params = {"Bucket": bucket_name, "Key": key, "Body": Stubber.ANY} + + if error_code is None: + response = {"ETag": f'"{etag}"'} + self.s3_stubber.add_response("put_object", response, expected_params) + else: + self.s3_stubber.add_client_error("put_object", error_code, expected_params=expected_params) + + def stub_head_object(self, bucket_name, key, etag="test-etag", error_code=None): + """Stub S3 head_object method.""" + expected_params = {"Bucket": bucket_name, "Key": key} + + if error_code is None: + response = {"ETag": f'"{etag}"'} + self.s3_stubber.add_response("head_object", response, expected_params) + else: + self.s3_stubber.add_client_error("head_object", error_code, expected_params=expected_params) + + def stub_create_job(self, account_id, job_id, error_code=None): + """Stub S3Control create_job method.""" + expected_params = { + "AccountId": account_id, + "Operation": Stubber.ANY, + "Report": Stubber.ANY, + "Manifest": Stubber.ANY, + "Priority": Stubber.ANY, + "RoleArn": Stubber.ANY, + "Description": Stubber.ANY, + "ConfirmationRequired": Stubber.ANY + } + + if error_code is None: + response = {"JobId": job_id} + self.s3control_stubber.add_response("create_job", response, expected_params) + else: + self.s3control_stubber.add_client_error("create_job", error_code, expected_params=expected_params) + + def stub_describe_job(self, account_id, job_id, status="Ready", failure_reasons=None, error_code=None): + """Stub S3Control describe_job method.""" + expected_params = {"AccountId": account_id, "JobId": job_id} + + if error_code is None: + job_data = { + "JobId": job_id, + "Status": status, + "Priority": 10, + "RoleArn": "arn:aws:iam::123456789012:role/S3BatchRole", + "Description": "Batch job for tagging objects" + } + if failure_reasons: + job_data["FailureReasons"] = failure_reasons + + response = {"Job": job_data} + self.s3control_stubber.add_response("describe_job", response, expected_params) + else: + self.s3control_stubber.add_client_error("describe_job", error_code, expected_params=expected_params) + + def stub_update_job_priority(self, account_id, job_id, priority=60, error_code=None): + """Stub S3Control update_job_priority method.""" + expected_params = {"AccountId": account_id, "JobId": job_id, "Priority": priority} + + if error_code is None: + response = {} + self.s3control_stubber.add_response("update_job_priority", response, expected_params) + else: + self.s3control_stubber.add_client_error("update_job_priority", error_code, expected_params=expected_params) + + def stub_update_job_status(self, account_id, job_id, status, error_code=None): + """Stub S3Control update_job_status method.""" + expected_params = {"AccountId": account_id, "JobId": job_id, "RequestedJobStatus": status} + + if error_code is None: + response = {} + self.s3control_stubber.add_response("update_job_status", response, expected_params) + else: + self.s3control_stubber.add_client_error("update_job_status", error_code, expected_params=expected_params) + + def stub_get_job_tagging(self, account_id, job_id, tags=None, error_code=None): + """Stub S3Control get_job_tagging method.""" + expected_params = {"AccountId": account_id, "JobId": job_id} + + if error_code is None: + response = {"Tags": tags or []} + self.s3control_stubber.add_response("get_job_tagging", response, expected_params) + else: + self.s3control_stubber.add_client_error("get_job_tagging", error_code, expected_params=expected_params) + + def stub_put_job_tagging(self, account_id, job_id, error_code=None): + """Stub S3Control put_job_tagging method.""" + expected_params = {"AccountId": account_id, "JobId": job_id, "Tags": Stubber.ANY} + + if error_code is None: + response = {} + self.s3control_stubber.add_response("put_job_tagging", response, expected_params) + else: + self.s3control_stubber.add_client_error("put_job_tagging", error_code, expected_params=expected_params) + + def stub_list_jobs(self, account_id, jobs=None, error_code=None): + """Stub S3Control list_jobs method.""" + expected_params = {"AccountId": account_id, "JobStatuses": Stubber.ANY} + + if error_code is None: + response = {"Jobs": jobs or []} + self.s3control_stubber.add_response("list_jobs", response, expected_params) + else: + self.s3control_stubber.add_client_error("list_jobs", error_code, expected_params=expected_params) + + def stub_delete_job_tagging(self, account_id, job_id, error_code=None): + """Stub S3Control delete_job_tagging method.""" + expected_params = {"AccountId": account_id, "JobId": job_id} + + if error_code is None: + response = {} + self.s3control_stubber.add_response("delete_job_tagging", response, expected_params) + else: + self.s3control_stubber.add_client_error("delete_job_tagging", error_code, expected_params=expected_params) + + def stub_delete_object(self, bucket_name, key, error_code=None): + """Stub S3 delete_object method.""" + expected_params = {"Bucket": bucket_name, "Key": key} + + if error_code is None: + response = {} + self.s3_stubber.add_response("delete_object", response, expected_params) + else: + self.s3_stubber.add_client_error("delete_object", error_code, expected_params=expected_params) + + def stub_list_objects_v2(self, bucket_name, prefix=None, contents=None, error_code=None): + """Stub S3 list_objects_v2 method.""" + expected_params = {"Bucket": bucket_name} + if prefix: + expected_params["Prefix"] = prefix + + if error_code is None: + response = {} + if contents: + response["Contents"] = contents + self.s3_stubber.add_response("list_objects_v2", response, expected_params) + else: + self.s3_stubber.add_client_error("list_objects_v2", error_code, expected_params=expected_params) + + def stub_delete_bucket(self, bucket_name, error_code=None): + """Stub S3 delete_bucket method.""" + expected_params = {"Bucket": bucket_name} + + if error_code is None: + response = {} + self.s3_stubber.add_response("delete_bucket", response, expected_params) + else: + self.s3_stubber.add_client_error("delete_bucket", error_code, expected_params=expected_params) \ No newline at end of file From adadba4694395f0561378b79685f249d82b2a1a6 Mon Sep 17 00:00:00 2001 From: John Lwin Date: Wed, 13 Aug 2025 03:15:58 -0700 Subject: [PATCH 14/16] chore: fix snippet tags --- python/example_code/s3/scenarios/batch/s3_batch_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py index c055801fdbc..686f62daca8 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py +++ b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py @@ -379,7 +379,7 @@ def get_job_tags(self, job_id: str, account_id: str) -> None: except ClientError as e: print(f"Error getting job tags: {e}") raise - # snippet-start:[python.example_code.s3control.get_job_tagging] + # snippet-end:[python.example_code.s3control.get_job_tagging] # snippet-start:[python.example_code.s3control.put_job_tagging] def put_job_tags(self, job_id: str, account_id: str) -> None: From 79a195446339c61c1de9f4e6700a95fb15d8ef2f Mon Sep 17 00:00:00 2001 From: John Lwin Date: Thu, 11 Sep 2025 21:58:09 -0700 Subject: [PATCH 15/16] chore: address metadata and README items --- .doc_gen/metadata/s3-control_metadata.yaml | 2 +- python/example_code/s3/scenarios/batch/README.md | 4 ++-- python/example_code/s3/scenarios/batch/s3_batch_wrapper.py | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.doc_gen/metadata/s3-control_metadata.yaml b/.doc_gen/metadata/s3-control_metadata.yaml index d7ffd4f54ed..cde11341416 100644 --- a/.doc_gen/metadata/s3-control_metadata.yaml +++ b/.doc_gen/metadata/s3-control_metadata.yaml @@ -212,7 +212,7 @@ s3-control_Basics: excerpts: - description: Learn S3 Batch Basics Scenario. snippet_tags: - - python.example_code.s3control.Batch.scenario + - python.example_code.s3control.helper.S3BatchScenario services: s3-control: { diff --git a/python/example_code/s3/scenarios/batch/README.md b/python/example_code/s3/scenarios/batch/README.md index 9500bcf0049..671714f5cd4 100644 --- a/python/example_code/s3/scenarios/batch/README.md +++ b/python/example_code/s3/scenarios/batch/README.md @@ -40,12 +40,12 @@ To run these examples, you need: To run this workflow, pull AWS tokens and run the command below: ```bash -python s3_batch.py +python s3_batch_scenario.py ``` ## Additional resources -- [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +- [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/userguide/batch-ops-create-job.html) - [Amazon S3 API Reference](https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html) - [boto3 Amazon S3 reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html) diff --git a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py index 686f62daca8..00ea6d87c1b 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py +++ b/python/example_code/s3/scenarios/batch/s3_batch_wrapper.py @@ -306,6 +306,7 @@ def update_job_priority(self, job_id: str, account_id: str) -> None: return # snippet-end:[python.example_code.s3control.update_job_priority] + # snippet-start:[python.example_code.s3control.update_job_status] def cancel_job(self, job_id: str, account_id: str) -> None: """ Cancel an S3 batch job. @@ -324,6 +325,7 @@ def cancel_job(self, job_id: str, account_id: str) -> None: except ClientError as e: print(f"Error canceling job: {e}") raise + # snippet-end:[python.example_code.s3control.update_job_status] # snippet-start:[python.example_code.s3control.describe_job] def describe_job_details(self, job_id: str, account_id: str) -> None: From c8a56051ea43e5a89c7ebae25ee4663ced2af64f Mon Sep 17 00:00:00 2001 From: John Lwin Date: Thu, 11 Sep 2025 23:32:02 -0700 Subject: [PATCH 16/16] add clean up resources for when scenario fails --- python/example_code/s3/scenarios/batch/s3_batch_scenario.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/python/example_code/s3/scenarios/batch/s3_batch_scenario.py b/python/example_code/s3/scenarios/batch/s3_batch_scenario.py index 70af2554428..3c15dbc1e3c 100644 --- a/python/example_code/s3/scenarios/batch/s3_batch_scenario.py +++ b/python/example_code/s3/scenarios/batch/s3_batch_scenario.py @@ -176,6 +176,12 @@ def run_scenario(self) -> None: except Exception as e: print(f"An error occurred: {e}") + print("\nCleaning up resources due to failure...") + try: + self.s3_batch_wrapper.cleanup_resources(bucket_name, file_names) + self.cfn_helper.destroy_cloudformation_stack(self.STACK_NAME) + except Exception as cleanup_error: + print(f"Error during cleanup: {cleanup_error}") raise print("\nThe Amazon S3 Batch scenario has successfully completed.")