S3 syncs

parent d2d26bcd
......@@ -15,6 +15,8 @@ At the moment it only does three things; blue/green deploys for plugging into Gi
- [RDS Snapshots](#rds-snapshots)
- [RDS](#rds)
- [Copy](#copy)
- [Disaster Recovery](#dr)
- [Transfer](#transfer)
- [Container](#container)
- [Billing](#billing)
- [Contributing](#contributing)
......@@ -237,6 +239,161 @@ Copy encrypted RDS instances between accounts:
`--region` is optional because it will default to the environment variable `AWS_DEFAULT_REGION`.
## Disaster Recovery
Akinaka has limited functionality for backing up and restoring data for use in disaster recovery.
### Transfer
Transfer data from S3, RDS, and RDS Aurora into a backup account:
akinaka dr \
--region eu-west-1 \
--source-role-arn arn:aws:iam::[LIVE_ACCOUNT_ID]:role/[ROLE_NAME] \
--destination-role-arn arn:aws:iam::[BACKUP_ACCOUNT_ID]:role/[ROLE_NAME] \
transfer \
--service s3
Omitting "--service" will include all supported services.
This requires that Akinaka is run from either an account or instance profile which can use sts:assume to assume both the `source-role-arn` and `destination-role-arn`. This is true even if you are running on the account that `destination-role-arn` is on.
The following policy is needed for usage of this subcommand:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "KMSEncrypt",
"Effect": "Allow",
"Action": [
"kms:GetPublicKey",
"kms:ImportKeyMaterial",
"kms:Decrypt",
"kms:UntagResource",
"kms:PutKeyPolicy",
"kms:GenerateDataKeyWithoutPlaintext",
"kms:Verify",
"kms:ListResourceTags",
"kms:GenerateDataKeyPair",
"kms:GetParametersForImport",
"kms:TagResource",
"kms:Encrypt",
"kms:GetKeyRotationStatus",
"kms:ReEncryptTo",
"kms:DescribeKey",
"kms:Sign",
"kms:CreateGrant",
"kms:ListKeyPolicies",
"kms:UpdateKeyDescription",
"kms:ListRetirableGrants",
"kms:GetKeyPolicy",
"kms:GenerateDataKeyPairWithoutPlaintext",
"kms:ReEncryptFrom",
"kms:RetireGrant",
"kms:ListGrants",
"kms:UpdateAlias",
"kms:RevokeGrant",
"kms:GenerateDataKey",
"kms:CreateAlias"
],
"Resource": [
"arn:aws:kms:*:*:alias/*",
"arn:aws:kms:*:*:key/*"
]
},
{
"Sid": "KMSCreate",
"Effect": "Allow",
"Action": [
"kms:DescribeCustomKeyStores",
"kms:ListKeys",
"kms:GenerateRandom",
"kms:UpdateCustomKeyStore",
"kms:ListAliases",
"kms:CreateKey",
"kms:ConnectCustomKeyStore",
"kms:CreateCustomKeyStore"
],
"Resource": "*"
}
]
}
The following policies need to be attached to the assume roles to backup each service:
#### RDS / RDS Aurora
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "RDSBackup",
"Effect": "Allow",
"Action": [
"rds:DescribeDBClusterSnapshotAttributes",
"rds:AddTagsToResource",
"rds:RestoreDBClusterFromSnapshot",
"rds:DescribeDBSnapshots",
"rds:DescribeGlobalClusters",
"rds:CopyDBSnapshot",
"rds:CopyDBClusterSnapshot",
"rds:DescribeDBSnapshotAttributes",
"rds:ModifyDBSnapshot",
"rds:ListTagsForResource",
"rds:CreateDBSnapshot",
"rds:DescribeDBClusterSnapshots",
"rds:DescribeDBInstances",
"rds:CreateDBClusterSnapshot",
"rds:ModifyDBClusterSnapshotAttribute",
"rds:ModifyDBSnapshotAttribute",
"rds:DescribeDBClusters",
"rds:DeleteDBSnapshot"
],
"Resource": "*"
}
]
}
#### S3
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "S3RW",
"Effect": "Allow",
"Action": [
"s3:ListBucketMultipartUploads",
"s3:GetObjectRetention",
"s3:GetObjectVersionTagging",
"s3:ListBucketVersions",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketVersioning",
"s3:GetBucketAcl",
"s3:GetObjectAcl",
"s3:GetObject",
"s3:GetEncryptionConfiguration",
"s3:ListAllMyBuckets",
"s3:PutLifecycleConfiguration",
"s3:GetObjectVersionAcl",
"s3:GetObjectTagging",
"s3:GetObjectVersionForReplication",
"s3:HeadBucket",
"s3:GetBucketLocation",
"s3:PutBucketVersioning",
"s3:GetObjectVersion",
"s3:PutObject",
"s3:PutObjectAcl",
"s3:PutEncryptionConfiguration",
"s3:PutBucketPolicy"
],
"Resource": "*"
}
]
}
## Container
Limited functionality for interactive with EKS and ECR. At the moment it's just getting a docker login via an assumed role to another assumed role:
......
......@@ -28,3 +28,27 @@ class AWS_Client():
client_options['aws_session_token'] = credentials['Credentials']['SessionToken']
return boto3.client(service, **client_options)
def create_resource(self, service, region, role_arn, valid_for=None):
"""
Takes service, region, role_arn, and optionally valid_for (duration in seconds),
and returns a boto3 client for that service, using that role_arn (with assume role)
"""
client_options = {
'region_name': region
}
sts_client = boto3.client('sts', region_name=region)
credentials = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName="akinaka-{}".format(strftime("%Y%m%d%H%M%S", gmtime())),
DurationSeconds=valid_for or 900
)
client_options['aws_access_key_id'] = credentials['Credentials']['AccessKeyId']
client_options['aws_secret_access_key'] = credentials['Credentials']['SecretAccessKey']
client_options['aws_session_token'] = credentials['Credentials']['SessionToken']
return boto3.resource(service, **client_options)
......@@ -4,8 +4,6 @@ from akinaka.libs import helpers, scan_resources_storage
from akinaka.libs import helpers, kms_share
from time import gmtime, strftime
import logging
import pprint
import boto3
aws_client = AWS_Client()
helpers.set_logger()
......@@ -73,11 +71,11 @@ def create_kms_key(region, assumable_role_arn):
@dr.command()
@click.pass_context
@click.option("--take-snapshot", is_flag=True, help="TODO: Boolean, default false. Take a live snapshot now, or take the existing latest snapshot")
@click.option("--db-names", required=False, help="Comma separated list of DB names to transfer")
@click.option("--names", required=False, help="Comma separated list of DB/S3 names to transfer")
@click.option("--service", type=click.Choice(['rds', 'aurora', 's3']), required=False, help="The service to transfer backups for. Defaults to all (RDS, S3)")
@click.option("--retention", required=False, help="Number of days of backups to keep")
@click.option("--rotate", is_flag=True, required=False, help="Only rotate backups so [retention] number of days is kep, don't do any actual backups")
def transfer(ctx, take_snapshot, db_names, service, retention, rotate):
def transfer(ctx, take_snapshot, names, service, retention, rotate):
"""
Backup [service] from owning account of [ctx.source_role_arn] to owning account
of [ctx.destination_role_arn].
......@@ -97,11 +95,11 @@ def transfer(ctx, take_snapshot, db_names, service, retention, rotate):
destination_kms_key = create_kms_key(region, destination_role_arn)
if service == 'rds':
if db_names:
db_names = [db_names.replace(' ','')]
if names:
db_names = [names.replace(' ','')]
else:
scanner = scan_resources_storage.ScanResources(region, source_role_arn)
db_names = db_names or scanner.scan_rds_instances()['db_names']
db_names = scanner.scan_rds_instances()['db_names']
rds(
dry_run,
......@@ -118,11 +116,11 @@ def transfer(ctx, take_snapshot, db_names, service, retention, rotate):
rotate)
if service == 'aurora':
if db_names:
db_names = [db_names.replace(' ','')]
if names:
db_names = [names.replace(' ','')]
else:
scanner = scan_resources_storage.ScanResources(region, source_role_arn)
db_names = db_names or scanner.scan_rds_aurora()['aurora_names']
db_names = scanner.scan_rds_aurora()['aurora_names']
rds(
dry_run,
......@@ -139,7 +137,53 @@ def transfer(ctx, take_snapshot, db_names, service, retention, rotate):
rotate)
if service == 's3':
logging.info('TODO')
if names:
names = [names.replace(' ','')]
else:
scanner = scan_resources_storage.ScanResources(region, source_role_arn)
names = scanner.scan_s3()['s3_names']
s3(
dry_run,
region,
source_role_arn,
destination_role_arn,
names,
source_kms_key,
destination_kms_key,
retention
)
def s3(
dry_run,
region,
source_role_arn,
destination_role_arn,
names,
source_kms_key,
destination_kms_key,
retention):
""" Call the S3 class to make backups of S3 buckets """
logging.info("Will attempt to backup the following S3 buckets, unless this is a dry run:")
logging.info(names)
if dry_run:
exit(0)
retention = retention or 7
from .s3 import transfer_s3
s3 = transfer_s3.TransferS3(
region=region,
source_role_arn=source_role_arn,
destination_role_arn=destination_role_arn,
source_kms_key=source_kms_key,
destination_kms_key=destination_kms_key,
retention=retention
)
s3.main(names)
def rds(
dry_run,
......
"""
main() will:
1. Create a backup bucket in the backup account
2. Set lifecycle policies such that only [self.retention] number of versions are kept in the backup bucket
3. Set policies for the source bucket so that the backup account can get objects from it
4. Set policies for the backup bucket so that the backup account can use s3:PutBucketEncryption
5. Set an encryption policy to use [self.destination_kms_key]
6. Sync the source bucket to the backup bucket
"""
#!/usr/bin/env python3
from datetime import datetime
from akinaka.client.aws_client import AWS_Client
from akinaka.libs import helpers, exceptions
import logging
helpers.set_logger()
aws_client = AWS_Client()
class TransferS3():
def __init__(
self,
region,
source_role_arn,
destination_role_arn,
source_kms_key,
destination_kms_key,
retention):
self.region = region
self.source_role_arn = source_role_arn
self.destination_role_arn = destination_role_arn
self.source_kms_key = source_kms_key
self.destination_kms_key = destination_kms_key
self.retention = retention
def main(self, old_bucket_names):
"""
1. Create bucket named s3_arn-backup with versioning
2. Set lifecycle configuration for objects in the bucket
3. Sync objects from the source buckets to the new bucket
"""
for old_bucket_name in old_bucket_names:
new_bucket_name = "{}-backup".format(old_bucket_name)
logging.info("Will create a backup bucket in the backup account if necessary")
new_bucket_name = self.create_backup_bucket(new_bucket_name, self.destination_role_arn)
self.set_bucket_lifecycle(new_bucket_name, self.retention)
self.set_bucket_policy(
bucket=old_bucket_name,
granter_role_arn=self.source_role_arn,
grantee_role_arn=self.destination_role_arn
)
self.set_bucket_policy(
bucket=new_bucket_name,
granter_role_arn=self.destination_role_arn,
grantee_role_arn=self.destination_role_arn
)
self.set_bucket_encryption(new_bucket_name, self.destination_kms_key)
self.sync_bucket(old_bucket_name, new_bucket_name, self.destination_kms_key)
def create_backup_bucket(self, new_bucket_name, role_arn):
"""
Create a bucket named [name]-backup to stored the backup objects in. Returns
the name of the new bucket
"""
destination_s3_client = aws_client.create_client('s3', self.region, role_arn)
try:
destination_s3_client.create_bucket(
ACL='private',
Bucket=new_bucket_name,
CreateBucketConfiguration={
'LocationConstraint': 'EU'
}
)
logging.info("Created the versioned bucket {}".format(new_bucket_name))
except destination_s3_client.exceptions.BucketAlreadyOwnedByYou:
logging.info("No need to create {}, as it already exists and we own it".format(new_bucket_name))
destination_s3_client.put_bucket_versioning(
Bucket=new_bucket_name,
VersioningConfiguration= {
'Status': 'Enabled'
}
)
logging.info("Successfully applied versioning to the bucket")
return new_bucket_name
def set_bucket_lifecycle(self, name, retention):
"""
Set the lifecycle policy for the versioned objects in bucket [name] to [retention] days
"""
destination_s3_client = aws_client.create_client('s3', self.region, self.destination_role_arn)
destination_s3_client.put_bucket_lifecycle_configuration(
Bucket=name,
LifecycleConfiguration={
'Rules': [
{
'ID': 'Akinaka',
'Prefix': '',
'Status': 'Enabled',
'NoncurrentVersionExpiration': {
'NoncurrentDays': retention
}
},
]
}
)
logging.info("Set a lifecycle policy to keep only {} " \
"versions of objects, for the destination bucket".format(retention))
def set_bucket_policy(self, bucket, granter_role_arn, grantee_role_arn):
"""
Set a policy on [bucket] such that the account of [granter_role_arn] can perform
get, list, and put operations.
Uses [granter_role_arn] to make the call, since
that is the only account which already has access to make this kind of change on
a bucket policy
"""
sts_client = aws_client.create_client('sts', self.region, grantee_role_arn)
account = sts_client.get_caller_identity()['Account']
source_s3_client = aws_client.create_client('s3', self.region, granter_role_arn)
source_s3_client.put_bucket_policy(
Bucket=bucket,
ConfirmRemoveSelfBucketAccess=True,
Policy="""{
"Id": "Policy1576178812268",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1576178805544",
"Action": "*",
"Effect": "Allow",
"Resource": "arn:aws:s3:::%s/*",
"Principal": {
"AWS": [
"arn:aws:iam::%s:root"
]
}
}
]
}""" % (bucket, account)
)
logging.info('Successfully set a bucket policy so that account {} '\
'can perform operations on bucket {}'.format(account, bucket))
def set_bucket_encryption(self, bucket, kms_key):
"""
Set the encryption options on [bucket] to be enabled and use [kms_key]
"""
destination_s3_client = aws_client.create_client('s3', self.region, self.destination_role_arn)
destination_s3_client.put_bucket_encryption(
Bucket=bucket,
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'aws:kms',
'KMSMasterKeyID': kms_key['KeyMetadata']['KeyId']
}
},
]
}
)
logging.info("Successfully set encryption on the bucket")
def sync_bucket(self, source_bucket, destination_bucket, kms_key):
"""
Sync objects from [source_bucket] to [destination_bucket], ensuring all objects
are encrypted with [kms_key]
"""
source_s3_client = aws_client.create_client('s3', self.region, self.source_role_arn)
destination_s3_client = aws_client.create_client('s3', self.region, self.destination_role_arn)
try:
source_objects = source_s3_client.list_objects(Bucket=source_bucket)['Contents']
except KeyError:
logging.error("Failed to get a listing for objects for the bucket, " \
"probably because there were no objects to sync")
return
for obj in source_objects:
copy_source = {
'Bucket': source_bucket,
'Key': obj['Key']
}
destination_s3_client.copy_object(
ACL='private',
Bucket=destination_bucket,
CopySource=copy_source,
Key=obj['Key'],
ServerSideEncryption='aws:kms',
SSEKMSKeyId=kms_key['KeyMetadata']['KeyId'],
)
logging.info("Synced object {}".format(obj['Key']))
......@@ -68,6 +68,5 @@ class ScanResources():
s3_client = aws_client.create_client('s3', self.region, self.role_arn)
names = [bucket['Name'] for bucket in s3_client.list_buckets()['Buckets']]
arns = [ "arn:aws:s3:::" + name for name in names ]
return { 's3_arns': arns }
return { 's3_names': names }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment