Skip to content
Snippets Groups Projects
Commit b57c26ad authored by Sebastian Rieger's avatar Sebastian Rieger
Browse files

added AWS ELB load balancer example

parent d43d1b39
No related branches found
No related tags found
No related merge requests found
...@@ -66,7 +66,10 @@ def main(): ...@@ -66,7 +66,10 @@ def main():
# #
########################################################################### ###########################################################################
print("Fetching images (AMI) list from AWS region. This will take a lot of seconds (AWS has a very long list of "
"supported operating systems and versions)... please be patient...")
images = conn.list_images() images = conn.list_images()
# image = '' # image = ''
# for img in images: # for img in images:
# # if img.name == ubuntu_image_name: # # if img.name == ubuntu_image_name:
...@@ -74,6 +77,8 @@ def main(): ...@@ -74,6 +77,8 @@ def main():
# print(img) # print(img)
# if img.id == ubuntu_image_name: # if img.id == ubuntu_image_name:
# image = img # image = img
# fetch/select the image referenced with ubuntu_image_name above
image = [i for i in images if i.name == ubuntu_image_name][0] image = [i for i in images if i.name == ubuntu_image_name][0]
print(image) print(image)
......
# import getpass
# import os
# import libcloud.security
import time
from libcloud.compute.base import NodeImage
from libcloud.compute.base import NodeState
from libcloud.compute.providers import get_driver as compute_get_driver
from libcloud.compute.types import Provider as compute_Provider
from libcloud.loadbalancer.base import Member, Algorithm
from libcloud.loadbalancer.types import Provider as loadbalancer_Provider
from libcloud.loadbalancer.providers import get_driver as loadbalancer_get_driver
# reqs:
# services: EC2 (nova, glance, neutron)
# resources: 2 instances, 2 elastic ips (1 keypair, 2 security groups)
# The image to look for and use for the started instance
# ubuntu_image_name = 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20200408'
ubuntu_image_id = "ami-085925f297f89fce1" # local ami id for resent ubuntu 18.04 20200408 in us-west-1
# The public key to be used for SSH connection, please make sure, that you have the corresponding private key
#
# id_rsa.pub should look like this (standard sshd pubkey format):
# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME
keypair_name = 'srieger-pub'
pub_key_file = '~/.ssh/id_rsa.pub'
flavor_name = 't2.nano'
# default region
# region_name = 'eu-central-1'
# region_name = 'ap-south-1'
# AWS Educate only allows us-east-1 see our AWS classroom at https://www.awseducate.com
# e.g., https://www.awseducate.com/student/s/launch-classroom?classroomId=a1v3m000005mNm6AAE
region_name = 'us-east-1'
def main():
###########################################################################
#
# get credentials
#
###########################################################################
# see AWS Educate classroom, Account Details
# access_id = getpass.win_getpass("Enter your access_id:")
# secret_key = getpass.win_getpass("Enter your secret_key:")
# session_token = getpass.win_getpass("Enter your session_token:")
# access_id = "ASIAU..."
# secret_key = "7lafW..."
# session_token = "IQoJb3JpZ...EMb//..."
access_id = "ASIA5ML7..."
secret_key = "76lAjn..."
session_token = "IQoJb3JpZ2luX2VjEBc..."
###########################################################################
#
# delete load balancer (Amazon AWS ELB)
#
###########################################################################
elb_provider = loadbalancer_get_driver(loadbalancer_Provider.ELB)
elb_conn = elb_provider(access_id,
secret_key,
token=session_token,
region=region_name)
print("Deleting previously created load balancers in: " + str(elb_conn.list_balancers()))
for loadbalancer in elb_conn.list_balancers():
if loadbalancer.name == "lb1":
elb_conn.destroy_balancer(loadbalancer)
###########################################################################
#
# create EC2 connection
#
###########################################################################
provider = compute_get_driver(compute_Provider.EC2)
conn = provider(key=access_id,
secret=secret_key,
token=session_token,
region=region_name)
###########################################################################
#
# clean up resources from previous demos
#
###########################################################################
# destroy running demo instances
for instance in conn.list_nodes():
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller',
'app-services', 'app-api-1', 'app-api-2']:
if instance.state is not NodeState.TERMINATED:
print('Destroying Instance: %s' % instance.name)
conn.destroy_node(instance)
# wait until all nodes are destroyed to be able to remove depended security groups
nodes_still_running = True
while nodes_still_running:
nodes_still_running = False
time.sleep(3)
instances = conn.list_nodes()
for instance in instances:
# if we see any demo instances still running continue to wait for them to stop
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-controller', 'app-services']:
if instance.state is not NodeState.TERMINATED:
nodes_still_running = True
print('There are still instances running, waiting for them to be destroyed...')
# delete security groups, respecting dependencies (hence deleting 'control' and 'services' first)
for group in conn.ex_list_security_groups():
if group in ['control', 'services']:
print('Deleting security group: %s' % group)
conn.ex_delete_security_group(group)
# now we can delete security groups 'api' and 'worker', as 'control' and 'api' depended on them, otherwise AWS will
# throw DependencyViolation: resource has a dependent object
for group in conn.ex_list_security_groups():
if group in ['api', 'worker']:
print('Deleting security group: %s' % group)
conn.ex_delete_security_group(group)
if __name__ == '__main__':
main()
# import getpass
# import os
# import libcloud.security
import time
from libcloud.compute.base import NodeImage
from libcloud.compute.base import NodeState
from libcloud.compute.providers import get_driver as compute_get_driver
from libcloud.compute.types import Provider as compute_Provider
from libcloud.loadbalancer.base import Member, Algorithm
from libcloud.loadbalancer.types import Provider as loadbalancer_Provider
from libcloud.loadbalancer.providers import get_driver as loadbalancer_get_driver
# reqs:
# services: EC2 (nova, glance, neutron)
# resources: 2 instances, 2 elastic ips (1 keypair, 2 security groups)
# The image to look for and use for the started instance
# ubuntu_image_name = 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20200408'
ubuntu_image_id = "ami-085925f297f89fce1" # local ami id for resent ubuntu 18.04 20200408 in us-west-1
# The public key to be used for SSH connection, please make sure, that you have the corresponding private key
#
# id_rsa.pub should look like this (standard sshd pubkey format):
# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME
keypair_name = 'srieger-pub'
pub_key_file = '~/.ssh/id_rsa.pub'
flavor_name = 't2.nano'
# default region
# region_name = 'eu-central-1'
# region_name = 'ap-south-1'
# AWS Educate only allows us-east-1 see our AWS classroom at https://www.awseducate.com
# e.g., https://www.awseducate.com/student/s/launch-classroom?classroomId=a1v3m000005mNm6AAE
region_name = 'us-east-1'
def main():
###########################################################################
#
# get credentials
#
###########################################################################
# see AWS Educate classroom, Account Details
# access_id = getpass.win_getpass("Enter your access_id:")
# secret_key = getpass.win_getpass("Enter your secret_key:")
# session_token = getpass.win_getpass("Enter your session_token:")
# access_id = "ASIAU..."
# secret_key = "7lafW..."
# session_token = "IQoJb3JpZ...EMb//..."
access_id = "ASIA..."
secret_key = "76lAj..."
session_token = "IQoJ..."
###########################################################################
#
# create connection
#
###########################################################################
provider = compute_get_driver(compute_Provider.EC2)
conn = provider(key=access_id,
secret=secret_key,
token=session_token,
region=region_name)
###########################################################################
#
# get image, flavor, network for instance creation
#
###########################################################################
# print("Fetching images (AMI) list from AWS region. This will take a lot of seconds (AWS has a very long list of "
# "supported operating systems and versions)... please be patient...")
# images = conn.list_images()
# fetch/select the image referenced with ubuntu_image_name above
# image = [i for i in images if i.name == ubuntu_image_name][0]
# print(image)
# selecting the image based on defined AMI id
image = NodeImage(id=ubuntu_image_id, name=None, driver=conn)
flavors = conn.list_sizes()
flavor = [s for s in flavors if s.id == flavor_name][0]
print(flavor)
# networks = conn.ex_list_networks()
# network = ''
# for net in networks:
# if net.name == project_network:
# network = net
###########################################################################
#
# create keypair dependency
#
###########################################################################
print('Checking for existing SSH key pair...')
keypair_exists = False
for keypair in conn.list_key_pairs():
if keypair.name == keypair_name:
keypair_exists = True
if keypair_exists:
print('Keypair ' + keypair_name + ' already exists. Skipping import.')
else:
print('adding keypair...')
conn.import_key_pair_from_file(keypair_name, pub_key_file)
for keypair in conn.list_key_pairs():
print(keypair)
###########################################################################
#
# clean up resources from previous demos
#
###########################################################################
# destroy running demo instances
for instance in conn.list_nodes():
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller',
'app-services', 'app-api-1', 'app-api-2']:
if instance.state is not NodeState.TERMINATED:
print('Destroying Instance: %s' % instance.name)
conn.destroy_node(instance)
# wait until all nodes are destroyed to be able to remove depended security groups
nodes_still_running = True
while nodes_still_running:
nodes_still_running = False
time.sleep(3)
instances = conn.list_nodes()
for instance in instances:
# if we see any demo instances still running continue to wait for them to stop
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-controller', 'app-services']:
if instance.state is not NodeState.TERMINATED:
nodes_still_running = True
print('There are still instances running, waiting for them to be destroyed...')
# delete security groups, respecting dependencies (hence deleting 'control' and 'services' first)
for group in conn.ex_list_security_groups():
if group in ['control', 'services']:
print('Deleting security group: %s' % group)
conn.ex_delete_security_group(group)
# now we can delete security groups 'api' and 'worker', as 'control' and 'api' depended on them, otherwise AWS will
# throw DependencyViolation: resource has a dependent object
for group in conn.ex_list_security_groups():
if group in ['api', 'worker']:
print('Deleting security group: %s' % group)
conn.ex_delete_security_group(group)
###########################################################################
#
# create security group dependency
#
###########################################################################
def get_security_group(connection, security_group_name):
"""A helper function to check if security group already exists"""
print('Checking for existing ' + security_group_name + ' security group...')
for security_grp in connection.ex_list_security_groups():
if security_grp == security_group_name:
print('Security Group ' + security_group_name + ' already exists. Skipping creation.')
return security_grp['group_id']
return False
if not get_security_group(conn, "api"):
api_security_group_result = conn.ex_create_security_group('api', 'for API services only')
api_security_group_id = api_security_group_result['group_id']
conn.ex_authorize_security_group_ingress(api_security_group_id, 22, 22, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
conn.ex_authorize_security_group_ingress(api_security_group_id, 80, 80, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
else:
api_security_group_id = get_security_group(conn, "api")
if not get_security_group(conn, "worker"):
worker_security_group_result = conn.ex_create_security_group('worker', 'for services that run on a worker node')
worker_security_group_id = worker_security_group_result['group_id']
conn.ex_authorize_security_group_ingress(worker_security_group_id, 22, 22, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
else:
worker_security_group_id = get_security_group(conn, "worker")
if not get_security_group(conn, "control"):
controller_security_group_result = conn.ex_create_security_group('control',
'for services that run on a control node')
controller_security_group_id = controller_security_group_result['group_id']
conn.ex_authorize_security_group_ingress(controller_security_group_id, 22, 22, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
conn.ex_authorize_security_group_ingress(controller_security_group_id, 80, 80, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
conn.ex_authorize_security_group_ingress(controller_security_group_id, 5672, 5672,
group_pairs=[{'group_id': worker_security_group_id}], protocol='tcp')
else:
controller_security_group_id = get_security_group(conn, "control")
if not get_security_group(conn, "services"):
services_security_group_result = conn.ex_create_security_group('services', 'for DB and AMQP services only')
services_security_group_id = services_security_group_result['group_id']
conn.ex_authorize_security_group_ingress(services_security_group_id, 22, 22, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
conn.ex_authorize_security_group_ingress(services_security_group_id, 3306, 3306, cidr_ips=['0.0.0.0/0'],
group_pairs=[{'group_id': api_security_group_id}], protocol='tcp')
conn.ex_authorize_security_group_ingress(services_security_group_id, 5672, 5672,
group_pairs=[{'group_id': worker_security_group_id}], protocol='tcp')
conn.ex_authorize_security_group_ingress(services_security_group_id, 5672, 5672,
group_pairs=[{'group_id': api_security_group_id}], protocol='tcp')
else:
services_security_group_id = get_security_group(conn, "services")
for security_group in conn.ex_list_security_groups():
print(security_group)
###########################################################################
#
# create app-services instance (database & messaging) (Amazon AWS EC2)
#
###########################################################################
# https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh
# is currently broken, hence the "rabbitctl" lines were added in the example
# below, see also https://bugs.launchpad.net/faafo/+bug/1679710
#
# Thanks to Stefan Friedmann for finding this fix ;)
userdata_service = '''#!/usr/bin/env bash
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
-i database -i messaging
rabbitmqctl add_user faafo guest
rabbitmqctl set_user_tags faafo administrator
rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*"
'''
print('Starting new app-services instance and wait until it is running...')
instance_services = conn.create_node(name='app-services',
image=image,
size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata_service,
ex_security_groups=["services"])
instance_services = conn.wait_until_running(nodes=[instance_services], timeout=120, ssh_interface='public_ips')
services_ip = instance_services[0][0].private_ips[0]
###########################################################################
#
# create app-api instances (Amazon AWS EC2)
#
###########################################################################
userdata_api = '''#!/usr/bin/env bash
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
-i faafo -r api -m 'amqp://faafo:guest@%(services_ip)s:5672/' \
-d 'mysql+pymysql://faafo:password@%(services_ip)s:3306/faafo'
''' % {'services_ip': services_ip}
print('Starting new app-api-1 instance and wait until it is running...')
instance_api_1 = conn.create_node(name='app-api-1',
image=image,
size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata_api,
ex_security_groups=["api"])
print('Starting new app-api-2 instance and wait until it is running...')
instance_api_2 = conn.create_node(name='app-api-2',
image=image,
size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata_api,
ex_security_groups=["api"])
instance_api_1 = conn.wait_until_running(nodes=[instance_api_1], timeout=120, ssh_interface='public_ips')
api_1_ip = instance_api_1[0][0].private_ips[0]
instance_api_2 = conn.wait_until_running(nodes=[instance_api_2], timeout=120, ssh_interface='public_ips')
api_2_ip = instance_api_2[0][0].private_ips[0]
###########################################################################
#
# create worker instances (Amazon AWS EC2)
#
###########################################################################
userdata_worker = '''#!/usr/bin/env bash
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
-i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/'
''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip}
# userdata_api-api-2 = '''#!/usr/bin/env bash
# curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
# -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/'
# ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip}
print('Starting new app-worker-1 instance and wait until it is running...')
instance_worker_1 = conn.create_node(name='app-worker-1',
image=image, size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata_worker,
ex_security_groups=["worker"])
print('Starting new app-worker-2 instance and wait until it is running...')
instance_worker_2 = conn.create_node(name='app-worker-2',
image=image, size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata_worker,
ex_security_groups=["worker"])
# do not start worker 3 initially, can be started using scale-out-add-worker.py demo
# print('Starting new app-worker-3 instance and wait until it is running...')
# instance_worker_3 = conn.create_node(name='app-worker-3',
# image=image, size=flavor,
# networks=[network],
# ex_keyname=keypair_name,
# ex_userdata=userdata_worker,
# ex_security_groups=[worker_security_group])
print(instance_worker_1)
print(instance_worker_2)
# print(instance_worker_3)
###########################################################################
#
# create load balancer (Amazon AWS ELB)
#
###########################################################################
elb_provider = loadbalancer_get_driver(loadbalancer_Provider.ELB)
elb_conn = elb_provider(access_id,
secret_key,
token=session_token,
region=region_name)
print("Deleting previously created load balancers in: " + str(elb_conn.list_balancers()))
for loadbalancer in elb_conn.list_balancers():
if loadbalancer.name == "lb1":
elb_conn.destroy_balancer(loadbalancer)
# get suffix (a, b, c, ...) from all availability zones, available in the selected region
all_availability_zones_in_region = []
for az in conn.ex_list_availability_zones():
all_availability_zones_in_region.append(az.name[-1])
# create new load balancer
# example uses "classic" ELB with default HTTP health. monitor, you can see the result in the EC2 console, after
# running this script
new_load_balancer = elb_conn.create_balancer(
name='lb1',
algorithm=Algorithm.ROUND_ROBIN,
port=80,
protocol='http',
members=[],
ex_members_availability_zones=all_availability_zones_in_region)
# attach api instances as members to load balancer
elb_conn.balancer_attach_compute_node(balancer=new_load_balancer, node=instance_api_1[0][0])
elb_conn.balancer_attach_compute_node(balancer=new_load_balancer, node=instance_api_2[0][0])
print("Created load balancer: " + str(new_load_balancer))
# wait for the load balancer to be ready
while new_load_balancer.state != 2:
time.sleep(3)
new_load_balancer = elb_conn.get_balancer(new_load_balancer.id)
print("\n\nYou can see the instances created in EC2 in AWS Console. You'll also find the load balancer under ELB "
"there.\n"
" You can access the faafo application deployed to the loadbalancer at: http://" + new_load_balancer.ip +
" as soon as instances are detected to be deployed and healthy by the load balancer.")
if __name__ == '__main__':
main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment