Commit 5bf9f707 authored by Afraz Ahmadzadeh's avatar Afraz Ahmadzadeh

Fix working out asg with --lb

get_lb_target_group_arn() should only return a single target group, not a list
parent 9c50513b
......@@ -9,3 +9,5 @@
/*.egg
/build/*
.vscode
new_asg.txt
......@@ -18,6 +18,95 @@ At the moment it only does three things; blue/green deploys for plugging into Gi
pip3 install akinaka
## Requirements and Presumptions
Format of ASG names: "whatever-you-like*-blue/green*" — the part in bold is necessary, i.e. you must have two ASGs, one ending with "-blue" and one ending with "-green".
The following permissions are necessary for the IAM role / user that will be running Akinaka:
sts:AssumeRole
The following permissions are necessary for the IAM role that the above role /user will be assuming, if you wish to use every single feature:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "2018121701",
"Effect": "Allow",
"Action": [
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DescribeInstances",
"ec2:CreateKeyPair",
"ec2:CreateImage",
"ec2:CopyImage",
"ec2:DescribeSnapshots",
"elasticloadbalancing:DescribeLoadBalancers",
"ec2:DeleteVolume",
"ec2:ModifySnapshotAttribute",
"autoscaling:DescribeAutoScalingGroups",
"ec2:DescribeVolumes",
"ec2:DetachVolume",
"ec2:DescribeLaunchTemplates",
"ec2:CreateTags",
"ec2:RegisterImage",
"autoscaling:DetachLoadBalancerTargetGroups",
"ec2:RunInstances",
"ec2:StopInstances",
"ec2:CreateVolume",
"autoscaling:AttachLoadBalancerTargetGroups",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"ec2:GetPasswordData",
"elasticloadbalancing:DescribeTargetGroupAttributes",
"elasticloadbalancing:DescribeAccountLimits",
"ec2:DescribeImageAttribute",
"elasticloadbalancing:DescribeRules",
"ec2:DescribeSubnets",
"ec2:DeleteKeyPair",
"ec2:AttachVolume",
"autoscaling:DescribeAutoScalingInstances",
"ec2:DeregisterImage",
"ec2:DeleteSnapshot",
"ec2:DescribeRegions",
"ec2:ModifyImageAttribute",
"elasticloadbalancing:DescribeListeners",
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"elasticloadbalancing:DescribeListenerCertificates",
"ec2:ModifyInstanceAttribute",
"elasticloadbalancing:DescribeSSLPolicies",
"ec2:TerminateInstances",
"elasticloadbalancing:DescribeTags",
"ec2:DescribeTags",
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeSecurityGroups",
"ec2:DescribeImages",
"ec2:DeleteSecurityGroup",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:DescribeTargetGroups"
],
"Resource": "*"
},
{
"Sid": "2018121702",
"Effect": "Allow",
"Action": [
"ssm:PutParameter",
"ssm:GetParameter",
"autoscaling:UpdateAutoScalingGroup",
"ec2:ModifyLaunchTemplate",
"ec2:CreateLaunchTemplateVersion",
"autoscaling:AttachLoadBalancerTargetGroups"
],
"Resource": [
"arn:aws:autoscaling:*:*:autoScalingGroup:*:autoScalingGroupName/*",
"arn:aws:ssm:eu-west-1:[YOUR_ACCOUNT]:parameter/deploying-status-*",
"arn:aws:ec2:*:*:launch-template/*"
]
}
]
}
## A Note on Role Assumption
Akinaka uses IAM roles to gain access into multiple accounts. Most commands require you to specify a list of roles you wish to perform a task for, and that role must have the [sts:AssumeRole](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_enable-create.html) permission. This is not only good security, it's helpful for ensuring you're doing things to the accounts you think you're doing things for ;)
......
......@@ -7,7 +7,10 @@ from datetime import timedelta, timezone, datetime
from time import strftime
import dateutil.parser
from akinaka_client.aws_client import AWS_Client
from akinaka_libs import helpers
import logging
helpers.set_logger()
aws_client = AWS_Client()
class CleanupAMIs():
......@@ -43,7 +46,7 @@ class CleanupAMIs():
amis_to_delete.add(ami['ImageId'])
# Same with list comprehension
# print([ami for ami in all_amis if dateutil.parser.parse(ami['CreationDate']) < self.retention_start])
# logging.info([ami for ami in all_amis if dateutil.parser.parse(ami['CreationDate']) < self.retention_start])
return amis_to_delete
def delist_in_use_amis(self, amis):
......@@ -119,10 +122,10 @@ class CleanupAMIs():
amis_to_delete = self.delist_launch_template_finds(amis_to_delete)
if self.not_dry_run:
print("Deleting the following AMIs and their snapshots: {}".format(amis_to_delete))
logging.info("Deleting the following AMIs and their snapshots: {}".format(amis_to_delete))
self.delete_amis(amis_to_delete)
else:
print("These are the AMIs I would have deleted if you gave me --not-dry-run: {}".format(amis_to_delete))
logging.info("These are the AMIs I would have deleted if you gave me --not-dry-run: {}".format(amis_to_delete))
import click
from akinaka_libs import helpers
import logging
helpers.set_logger()
@click.group()
@click.option("--region", required=True, help="Region your resources are located in")
......@@ -37,7 +41,7 @@ def ami(ctx, retention, not_dry_run, exceptional_amis, launch_templates):
amis.cleanup()
exit(0)
except Exception as e:
print(e)
logging.error(e)
exit(1)
@cleanup.command()
......@@ -54,7 +58,7 @@ def ebs(ctx):
volumes.cleanup()
exit(0)
except Exception as e:
print(e)
logging.error(e)
exit(1)
......
......@@ -7,7 +7,10 @@ from datetime import timedelta, timezone, datetime
from time import strftime
import dateutil.parser
from akinaka_client.aws_client import AWS_Client
from akinaka_libs import helpers
import logging
helpers.set_logger()
aws_client = AWS_Client()
class CleanupVolumes():
......@@ -39,16 +42,16 @@ class CleanupVolumes():
def cleanup(self):
for role in self.role_arns:
print("\nProcessing account: {}".format(role))
logging.error("\nProcessing account: {}".format(role))
volumes_to_delete = self.list_available_volumes(role)
if self.not_dry_run:
print("Deleting the following volumes and their snapshots: {}".format(volumes_to_delete))
logging.info("Deleting the following volumes and their snapshots: {}".format(volumes_to_delete))
self.delete_volumes(volumes_to_delete, role)
else:
print("These are the volumes I would have deleted if you gave me --not-dry-run:\n")
logging.info("These are the volumes I would have deleted if you gave me --not-dry-run:\n")
for volume in volumes_to_delete:
print("Volume: {}\n".format(volume['VolumeId']))
logging.info("Volume: {}\n".format(volume['VolumeId']))
from akinaka_libs import helpers
import logging
helpers.set_logger()
class AkinakaGeneralError(Exception):
pass
class AkinakaUpdateError(Exception):
pass
class AkinakaLoggingError(Exception):
def __init__(self, message=None):
super().__init__(message)
logging.error(message)
class AkinakaCriticalException(Exception):
def __init__(self, message=None):
super().__init__(message)
logging.error(message)
exit(1)
\ No newline at end of file
from datetime import timedelta, timezone, datetime
from time import strftime
import pprint
import logging
# Take a duration in seconds and work out the datetime value for the datetime at that date and time ago
def datetime_this_seconds_ago(duration):
......@@ -7,3 +9,9 @@ def datetime_this_seconds_ago(duration):
def seconds_from_hours(hours):
return (60*60)*hours
def log(message):
print(message)
def set_logger():
return logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S%z')
import click
from akinaka_libs import helpers
import logging
helpers.set_logger()
@click.group()
@click.option("--region", envvar='AWS_DEFAULT_REGION', help="Region your resources are located in")
......@@ -30,5 +34,5 @@ def rds(ctx, source_role_arn, target_role_arn, snapshot_style, source_instance_n
rds_copy.copy_instance()
exit(0)
except Exception as e:
print(e)
logging.error(e)
exit(1)
......@@ -5,7 +5,10 @@ import boto3
import time
import datetime
import sys
from akinaka_libs import helpers
import logging
helpers.set_logger()
aws_client = AWS_Client()
class CopyRDS():
......@@ -21,7 +24,7 @@ class CopyRDS():
self.target_instance_name = target_instance_name
def copy_instance(self):
logging.info("Starting RDS copy...")
rds_source_client = aws_client.create_client('rds', self.region, self.source_role_arn, 5400)
rds_target_client = aws_client.create_client('rds', self.region, self.target_role_arn, 5400)
kms_client = aws_client.create_client('kms', self.region, self.source_role_arn, 5400)
......@@ -64,27 +67,27 @@ class CopyRDS():
self.modify_rds_instance_security_groups(rds_client=rds_target_client, instancename=self.target_instance_name, securitygroup=self.target_security_group)
print(" Finished, check instance {}!".format(self.target_instance_name))
logging.info("Finished, check instance {}!".format(self.target_instance_name))
def get_kms_key(self, kms_client, source_account, target_account, target_account_arn):
key_alias = 'alias/RDSBackupRestoreSharedKeyWith{}'.format(target_account)
print("Searching for Customer Managed KMS Key with alias {} that is already shared with account {}...".format(key_alias, target_account))
logging.info("Searching for Customer Managed KMS Key with alias {} that is already shared with account {}...".format(key_alias, target_account))
# try to retrieve the KMS key with the specified alias to see if it exists
try:
key = kms_client.describe_key(KeyId=key_alias)
print(" Found key: {}".format(key['KeyMetadata']['Arn']))
logging.info("Found key: {}".format(key['KeyMetadata']['Arn']))
return key
except kms_client.exceptions.NotFoundException:
# if it doesn't exist, create it
print(" No valid key found.")
logging.error("No valid key found.")
key = self.create_shared_kms_key(kms_client, source_account, target_account, target_account_arn, key_alias)
return key
def create_shared_kms_key(self, kms_client, source_account, target_account, target_account_arn, key_alias):
print("Creating Customer Managed KMS Key that is shared...")
logging.info("Creating Customer Managed KMS Key that is shared...")
# create a Customer Managed KMS key, needed to be able to share the encrypted snapshot
kms_key = kms_client.create_key(
......@@ -123,7 +126,7 @@ class CopyRDS():
TargetKeyId=kms_key['KeyMetadata']['Arn']
)
print("Created KMS Key {}, shared with account {}".format(kms_key['KeyMetadata']['Arn'], target_account_arn))
logging.info("Created KMS Key {}, shared with account {}".format(kms_key['KeyMetadata']['Arn'], target_account_arn))
return kms_key
def copy_shared_snapshot_to_local(self, rds_client, shared_snapshot, kms_key):
......@@ -132,7 +135,7 @@ class CopyRDS():
# account where we want to restore the RDS instance
target_db_snapshot_id = "{}-copy".format(shared_snapshot['DBSnapshotIdentifier'])
print("Copying shared snaphot {} to local snapshot {}...".format(shared_snapshot['DBSnapshotArn'], target_db_snapshot_id))
logging.info("Copying shared snaphot {} to local snapshot {}...".format(shared_snapshot['DBSnapshotArn'], target_db_snapshot_id))
try:
copy = rds_client.copy_db_snapshot(
......@@ -140,48 +143,47 @@ class CopyRDS():
TargetDBSnapshotIdentifier=target_db_snapshot_id,
KmsKeyId=kms_key['KeyMetadata']['Arn']
)
print(" Copy created.")
logging.info("Copy created.")
return copy['DBSnapshot']
except rds_client.exceptions.DBSnapshotAlreadyExistsFault:
# if the snapshot we tried to make already exists, retrieve it
print("Snapshot already exists, retrieving {}...".format(target_db_snapshot_id))
logging.info("Snapshot already exists, retrieving {}...".format(target_db_snapshot_id))
snapshots = rds_client.describe_db_snapshots(
DBSnapshotIdentifier=target_db_snapshot_id,
)
print(" Retrieved.")
logging.info("Retrieved.")
return snapshots['DBSnapshots'][0]
def share_snapshot_with_external_account(self, rds_client, snapshot, target_account):
# in order to restore a snapshot from another account it needs to be shared
# with that account first
print("Modifying snaphot {} to be shared with account {}...".format(snapshot['DBSnapshotArn'], target_account))
logging.info("Modifying snaphot {} to be shared with account {}...".format(snapshot['DBSnapshotArn'], target_account))
rds_client.modify_db_snapshot_attribute(
DBSnapshotIdentifier=snapshot['DBSnapshotIdentifier'],
AttributeName='restore',
ValuesToAdd=[target_account]
)
print(" Modified.")
logging.info("Modified.")
def rename_or_delete_target_instance(self, rds_client, instancename, overwrite_target ):
print("Checking for an existing RDS instance by the name {} and renaming or deleting if it's found".format(instancename))
logging.info("Checking for an existing RDS instance by the name {} and renaming or deleting if it's found".format(instancename))
# check if we already have an instance by this name
try:
instance = rds_client.describe_db_instances(DBInstanceIdentifier=instancename)['DBInstances'][0]
print(" Instance found")
logging.info("Instance found")
except rds_client.exceptions.DBInstanceNotFoundFault:
instance = None
print(" Instance not found")
logging.info("Instance not found")
if instance is not None:
if overwrite_target:
print(" Instance found and overwrite if found True, deleting instance")
logging.info("Instance found and overwrite if found True, deleting instance")
rds_client.delete_db_instance(
DBInstanceIdentifier=instancename,
SkipFinalSnapshot=True
)
print(" Deleting instance. This will take a while...")
logging.info("Deleting instance. This will take a while...")
waiter = rds_client.get_waiter('db_instance_deleted')
waiter.wait(
DBInstanceIdentifier=instancename,
......@@ -189,9 +191,9 @@ class CopyRDS():
'MaxAttempts': 120
}
)
print(" Instance is deleted!")
logging.info("Instance is deleted!")
else:
print(" Instance found and renaming instance")
logging.info("Instance found and renaming instance")
try:
rds_client.modify_db_instance(
DBInstanceIdentifier=instancename,
......@@ -207,10 +209,10 @@ class CopyRDS():
while True:
instancecheck = rds_client.describe_db_instances(DBInstanceIdentifier=instance['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0]
if instancecheck['DBInstanceStatus'] == 'available':
print(" Instance {} ready and available!".format(instance['DBInstance']['DBInstanceIdentifier']))
logging.info("Instance {} ready and available!".format(instance['DBInstance']['DBInstanceIdentifier']))
break
else:
print("Instance creation in progress, sleeping 10 seconds...")
logging.info("Instance creation in progress, sleeping 10 seconds...")
time.sleep(10)
def wait_for_snapshot_to_be_ready(self, rds_client, snapshot):
......@@ -219,29 +221,28 @@ class CopyRDS():
while True:
snapshotcheck = rds_client.describe_db_snapshots(DBSnapshotIdentifier=snapshot['DBSnapshotIdentifier'])['DBSnapshots'][0]
if snapshotcheck['Status'] == 'available':
print(" Snapshot {} complete and available!".format(snapshot['DBSnapshotIdentifier']))
logging.info("Snapshot {} complete and available!".format(snapshot['DBSnapshotIdentifier']))
break
else:
print("Snapshot {} in progress, {}% complete".format(snapshot['DBSnapshotIdentifier'], snapshotcheck['PercentProgress']))
logging.info("Snapshot {} in progress, {}% complete".format(snapshot['DBSnapshotIdentifier'], snapshotcheck['PercentProgress']))
time.sleep(10)
def make_snapshot_from_running_instance(self, rds_client, source_instance_name):
print("Making a new snapshot from the running RDS instance")
logging.info("Making a new snapshot from the running RDS instance")
try:
today = datetime.date.today()
snapshot = rds_client.create_db_snapshot(
DBInstanceIdentifier=source_instance_name,
DBSnapshotIdentifier="{}-{:%Y-%m-%d}".format(source_instance_name, today),
)
print(" Snapshot created.")
logging.info("Snapshot created.")
return snapshot['DBSnapshot']
except Exception as exception:
print("ERROR: Failed to make snapshot from instance")
print(exception)
logging.error("Failed to make snapshot from instance: {}".format(exception))
sys.exit(1)
def get_latest_automatic_rds_snapshots(self, rds_client, source_instance_name):
print("Getting latest (automated) snapshot from rds instance {}...".format(source_instance_name))
logging.info("Getting latest (automated) snapshot from rds instance {}...".format(source_instance_name))
# we can't query for the latest snapshot straight away, so we have to retrieve
# a full list and go through all of them
snapshots = rds_client.describe_db_snapshots(
......@@ -256,7 +257,7 @@ class CopyRDS():
if snapshot['SnapshotCreateTime'] > latest['SnapshotCreateTime']:
latest = snapshot
print(" Found snapshot {}".format(latest['DBSnapshotIdentifier']))
logging.info("Found snapshot {}".format(latest['DBSnapshotIdentifier']))
return latest
def recrypt_snapshot_with_new_key(self, rds_client, snapshot, kms_key):
......@@ -266,7 +267,7 @@ class CopyRDS():
else:
target_db_snapshot_id = "{}-recrypted".format(snapshot['DBSnapshotIdentifier'])
print("Copying automatic snapshot to manual snapshot...")
logging.info("Copying automatic snapshot to manual snapshot...")
try:
# copy the snapshot, supplying the new KMS key (which is also shared with
......@@ -276,11 +277,11 @@ class CopyRDS():
TargetDBSnapshotIdentifier=target_db_snapshot_id,
KmsKeyId=kms_key['KeyMetadata']['Arn']
)
print(" Snapshot created.")
logging.info("Snapshot created.")
return copy['DBSnapshot']
except rds_client.exceptions.DBSnapshotAlreadyExistsFault:
# if the snapshot we tried to make already exists, retrieve it
print("Snapshot already exists, retrieving {}".format(target_db_snapshot_id))
logging.info("Snapshot already exists, retrieving {}".format(target_db_snapshot_id))
snapshots = rds_client.describe_db_snapshots(
DBSnapshotIdentifier=target_db_snapshot_id,
......@@ -290,7 +291,7 @@ class CopyRDS():
def create_rds_instance_from_snapshot(self, rds_client, snapshot, instancename, dbsubnet_group):
# restore an instance from the specified snapshot
print("Restoring RDS instance {} from snapshot {}".format(instancename, snapshot['DBSnapshotIdentifier']))
logging.info("Restoring RDS instance {} from snapshot {}".format(instancename, snapshot['DBSnapshotIdentifier']))
try:
if dbsubnet_group is None:
dbsubnet_group = 'default'
......@@ -300,15 +301,14 @@ class CopyRDS():
DBSnapshotIdentifier=snapshot['DBSnapshotArn'],
DBSubnetGroupName=dbsubnet_group,
)
print(" RDS instance restored.")
logging.info("RDS instance restored.")
return instance
except rds_client.exceptions.DBInstanceAlreadyExistsFault:
print("ERROR: an instance with the name {} already exists, please specify a different name or remove that instance first".format(instancename))
logging.error("An instance with the name {} already exists, please specify a different name or remove that instance first".format(instancename))
sys.exit(1)
def modify_rds_instance_security_groups(self, rds_client, instancename, securitygroup):
print("Modifying RDS instance to attach correct securitygroup")
logging.info("Modifying RDS instance to attach correct securitygroup")
try:
rds_client.modify_db_instance(
DBInstanceIdentifier=instancename,
......@@ -317,6 +317,7 @@ class CopyRDS():
],
ApplyImmediately=True
)
print(" RDS Instance {} modified".format(instancename))
logging.info("RDS Instance {} modified".format(instancename))
except Exception as e:
logging.error("{}".format(e))
raise
......@@ -5,6 +5,10 @@ import time
import datetime
import sys
import tabulate
from akinaka_libs import helpers
import logging
helpers.set_logger()
class BillingQueries():
def __init__(self, region, assume_role_arn):
......@@ -15,7 +19,7 @@ class BillingQueries():
response = self.costexplorer.get_bill_estimates(from_days_ago)
data = response['ResultsByTime']
except Exception as e:
print("Billing estimates is not available: {}".format(e))
logging.error("Billing estimates is not available: {}".format(e))
return e
results = []
......@@ -35,5 +39,5 @@ class BillingQueries():
message += tabulate.tabulate(results, headers=["Date", "Total"], tablefmt='psql')
message += "\n"
print(message)
return message
\ No newline at end of file
logging.info(message)
return
\ No newline at end of file
#!/usr/bin/env python3
from time import sleep
from akinaka_libs import helpers
from akinaka_libs import exceptions
from akinaka_client.aws_client import AWS_Client
import logging
aws_client = AWS_Client()
class ASG():
def __init__(self, ami, region, role_arn, loadbalancer=None, asg=None, target_group=None):
"""All the methods needed to perform a blue/green deploy"""
def __init__(self, ami, region, role_arn, loadbalancer=None, asg=None, target_group=None, scale_to=None):
self.loadbalancer = loadbalancer
self.ami = ami
self.region = region
self.role_arn = role_arn
self.asg = asg
self.target_group = target_group
self.scale_to = scale_to if scale_to else 1
def do_update(self):
target_groups = None
new_ami = self.ami
def return_application_name(self):
"""
Uses the classes self.targetgroup or self.loadbalancer arguments (whichever is available)
to return a string denoting the application attached to that argument
"""
if self.loadbalancer:
target_group_arn = self.get_lb_target_group_arn()
elif self.target_group:
target_group_arn = self.get_target_group_arn(self.target_group)
else:
raise exceptions.AkinakaCriticalException("Couldn't get name of the application we're updating")
active_asg = self.get_active_asg(target_group_arn)
asg_split = active_asg.split('-')[0:-1]
return '-'.join(asg_split)
def work_out_new_asg(self):
if self.asg is not None:
new_asg = self.asg
logging.info("We've been given the ASG name as an argument")
return self.asg
elif self.loadbalancer is not None and self.target_group is None:
target_groups = self.get_lb_target_groups()
target_group_arn = self.get_lb_target_group_arn()
elif self.loadbalancer is None and self.target_group is not None:
target_groups = [self.get_target_group_arn(self.target_group)]
target_group_arn = self.get_target_group_arn(self.target_group)
else:
print("""
One of these mutually exclusive options need to be passed:
--lb
--asg
--target-groups
""")
exit(1)
if target_groups is not None:
these_current_asg_instances = self.current_asg_instances(target_groups)
new_asg = self.get_inactive_asg(these_current_asg_instances)
raise exceptions.AkinakaCriticalException("Pass either --lb, --asg, or --lt")
active_asg = self.get_active_asg(target_group_arn)
new_asg = self.get_inactive_asg(active_asg)
try:
self.update_launch_template(new_asg, new_ami, self.get_lt_name(new_asg))
self.update_asg(new_asg, 1, 1, 1)
open("new_asg.txt", "w").write(new_asg)
except Exception as e:
print("Didn't update the ASG {}, because: {}".format(new_asg, e))
exit(1)
return new_asg