Skip to content
Snippets Groups Projects
Commit 0454b2a8 authored by Sebastian Rieger's avatar Sebastian Rieger
Browse files

faafo aws demo

parent 2441bb4f
Branches
No related tags found
No related merge requests found
import getpass
# import os
# import libcloud.security
import time
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider, NodeState
# reqs:
# services: EC2 (nova, glance, neutron)
# resources: 2 instances (m1.small), 2 elastic ips (1 keypair, 2 security groups)
# default region
# region_name = 'eu-central-1'
region_name = 'us-east-2'
#region_name = 'ap-south-1'
def main():
###########################################################################
#
# get credentials
#
###########################################################################
access_id = getpass.win_getpass("Enter your access_id:")
secret_key = getpass.win_getpass("Enter your secret_key:")
# access_id = "AXY..."
# secret_key = "j1zomy61..."
###########################################################################
#
# create connection
#
###########################################################################
provider = get_driver(Provider.EC2)
conn = provider(access_id,
secret_key,
region=region_name)
###########################################################################
#
# clean up resources from previous demos
#
###########################################################################
# destroy running demo instances
for instance in conn.list_nodes():
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller',
'app-services', 'app-api-1', 'app-api-2']:
print('Destroying Instance: %s' % instance.name)
conn.destroy_node(instance)
# wait until all nodes are destroyed to be able to remove depended security groups
nodes_still_running = True
while nodes_still_running:
nodes_still_running = False
time.sleep(3)
instances = conn.list_nodes()
for instance in instances:
# if we see any demo instances still running continue to wait for them to stop
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller',
'app-services', 'app-api-1', 'app-api-2']:
if instance.state.value is not 'terminated':
nodes_still_running = True
print('There are still instances running, waiting for them to be destroyed...')
# delete security groups
for group in conn.ex_list_security_groups():
if group in ['control', 'worker', 'api', 'services']:
print('Deleting security group: %s' % group)
conn.ex_delete_security_group(group)
# release elastic ips
for elastic_ip in conn.ex_describe_all_addresses():
if elastic_ip.instance_id is None:
print('Releasing unused elastic ip %s' % elastic_ip)
conn.ex_release_address(elastic_ip, domain=elastic_ip.domain)
if __name__ == '__main__':
main()
import getpass
# import os
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
# reqs:
# services: EC2 (nova, glance, neutron)
# resources: 2 instances, 2 elastic ips (1 keypair, 2 security groups)
# The image to look for and use for the started instance
ubuntu_image_name = 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20200408'
#ubuntu_image_id = "ami-0e342d72b12109f91" # local ami id for resent ubuntu 18.04 20200408 in eu-central-1
# The public key to be used for SSH connection, please make sure, that you have the corresponding private key
#
# id_rsa.pub should look like this (standard sshd pubkey format):
# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME
keypair_name = 'srieger-pub'
pub_key_file = '~/.ssh/id_rsa.pub'
flavor_name = 't2.nano'
# default region
region_name = 'eu-central-1'
# region_name = 'us-east-2'
#region_name = 'ap-south-1'
def main():
###########################################################################
#
# get credentials
#
###########################################################################
access_id = getpass.win_getpass("Enter your access_id:")
secret_key = getpass.win_getpass("Enter your secret_key:")
# access_id = "AXY..."
# secret_key = "j1zomy61..."
###########################################################################
#
# create connection
#
###########################################################################
provider = get_driver(Provider.EC2)
conn = provider(access_id,
secret_key,
region=region_name)
###########################################################################
#
# get image, flavor, network for instance creation
#
###########################################################################
images = conn.list_images()
# image = ''
# for img in images:
# # if img.name == ubuntu_image_name:
# if img.extra['owner_alias'] == 'amazon':
# print(img)
# if img.id == ubuntu_image_name:
# image = img
image = [i for i in images if i.name == ubuntu_image_name][0]
print(image)
flavors = conn.list_sizes()
flavor = [s for s in flavors if s.id == flavor_name][0]
print(flavor)
# networks = conn.ex_list_networks()
# network = ''
# for net in networks:
# if net.name == project_network:
# network = net
###########################################################################
#
# create keypair dependency
#
###########################################################################
print('Checking for existing SSH key pair...')
keypair_exists = False
for keypair in conn.list_key_pairs():
if keypair.name == keypair_name:
keypair_exists = True
if keypair_exists:
print('Keypair ' + keypair_name + ' already exists. Skipping import.')
else:
print('adding keypair...')
conn.import_key_pair_from_file(keypair_name, pub_key_file)
for keypair in conn.list_key_pairs():
print(keypair)
###########################################################################
#
# create security group dependency
#
###########################################################################
print('Checking for existing worker security group...')
worker_security_group_exists = False
worker_security_group_name = 'worker'
for security_group in conn.ex_get_security_groups():
if security_group.name == worker_security_group_name:
worker_security_group_id = security_group.id
worker_security_group_exists = True
if worker_security_group_exists:
print('Worker Security Group ' + worker_security_group_name + ' already exists. Skipping creation.')
else:
worker_security_group_result = conn.ex_create_security_group('worker', 'for services that run on a worker node')
worker_security_group_id = worker_security_group_result['group_id']
conn.ex_authorize_security_group_ingress(worker_security_group_id, 22, 22, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
print('Checking for existing controller security group...')
controller_security_group_exists = False
controller_security_group_name = 'control'
controller_security_group_id = ''
for security_group in conn.ex_get_security_groups():
if security_group.name == controller_security_group_name:
controller_security_group_id = security_group.id
controller_security_group_exists = True
if controller_security_group_exists:
print('Controller Security Group ' + controller_security_group_name + ' already exists. Skipping creation.')
else:
controller_security_group_result = conn.ex_create_security_group('control',
'for services that run on a control node')
controller_security_group_id = controller_security_group_result['group_id']
conn.ex_authorize_security_group_ingress(controller_security_group_id, 22, 22, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
conn.ex_authorize_security_group_ingress(controller_security_group_id, 80, 80, cidr_ips=['0.0.0.0/0'],
protocol='tcp')
conn.ex_authorize_security_group_ingress(controller_security_group_id, 5672, 5672,
group_pairs=[{'group_id': worker_security_group_id}], protocol='tcp')
# for security_group in conn.ex_list_security_groups():
# print(security_group)
###########################################################################
#
# create app-controller
#
###########################################################################
# https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh
# is currently broken, hence the "rabbitctl" lines were added in the example
# below, see also https://bugs.launchpad.net/faafo/+bug/1679710
#
# Thanks to Stefan Friedmann for finding this fix ;)
userdata = '''#!/usr/bin/env bash
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
-i messaging -i faafo -r api
rabbitmqctl add_user faafo guest
rabbitmqctl set_user_tags faafo administrator
rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*"
'''
print('Starting new app-controller instance and wait until it is running...')
instance_controller_1 = conn.create_node(name='app-controller',
image=image,
size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata,
ex_security_groups=[controller_security_group_name])
conn.wait_until_running(nodes=[instance_controller_1], timeout=120, ssh_interface='public_ips')
###########################################################################
#
# assign app-controller elastic ip
#
###########################################################################
# AWS offers elastic ips, that have the same function as floating IPs in OpenStack. However elastic IPs cost money,
# and instances typically already have public IP in AWS, what a luxury ;)
print('Checking for unused Elastic IP...')
unused_elastic_ip = None
for elastic_ip in conn.ex_describe_all_addresses():
if not elastic_ip.instance_id:
unused_elastic_ip = elastic_ip
break
if not unused_elastic_ip:
print('Allocating new Elastic IP')
unused_elastic_ip = conn.ex_allocate_address()
conn.ex_associate_address_with_node(instance_controller_1, unused_elastic_ip)
print('Controller Application will be deployed to http://%s' % unused_elastic_ip.ip)
###########################################################################
#
# getting id and ip address of app-controller instance
#
###########################################################################
# instance should not have a public ip? floating ips are assigned later
# instance_controller_1 = conn.ex_get_node_details(instance_controller_1.id)
# if instance_controller_1.public_ips:
# ip_controller = instance_controller_1.public_ips[0]
# else:
ip_controller = instance_controller_1.private_ips[0]
###########################################################################
#
# create app-worker-1
#
###########################################################################
userdata = '''#!/usr/bin/env bash
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
-i faafo -r worker -e 'http://%(ip_controller)s' -m 'amqp://faafo:guest@%(ip_controller)s:5672/'
''' % {'ip_controller': ip_controller}
print('Starting new app-worker-1 instance and wait until it is running...')
instance_worker_1 = conn.create_node(name='app-worker-1',
image=image,
size=flavor,
ex_keyname=keypair_name,
ex_userdata=userdata,
ex_security_groups=[worker_security_group_name])
conn.wait_until_running(nodes=[instance_worker_1], timeout=120, ssh_interface='public_ips')
###########################################################################
#
# assign app-worker elastic ip
#
###########################################################################
# AWS offers elastic ips, that have the same function as floating IPs in OpenStack. However elastic IPs cost money,
# and instances typically already have public IP in AWS, what a luxury ;)
print('Checking for unused Elastic IP...')
unused_elastic_ip = None
for elastic_ip in conn.ex_describe_all_addresses():
if not elastic_ip.instance_id:
unused_elastic_ip = elastic_ip
break
if not unused_elastic_ip:
print('Allocating new Elastic IP')
unused_elastic_ip = conn.ex_allocate_address()
conn.ex_associate_address_with_node(instance_worker_1, unused_elastic_ip)
print('The worker will be available for SSH at %s' % unused_elastic_ip.ip)
print('You can use ssh to login to the controller using your private key. After login, you can list available '
'fractals using "faafo list". To request the generation of new fractals, you can use "faafo create". '
'You can also see other options to use the faafo example cloud service using "faafo -h".')
if __name__ == '__main__':
main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment