๐ก provider ์ค์ ๋ฐ terraform ์ฌ์ฉ๋ฒ์ ์ด์ ํฌ์คํ ์์ ๋ค๋ฃจ๊ณ ์์ต๋๋ค.
VPCs & NAT
"main" VPC๋ฅผ ์์ฑํ๊ณ , ๋ ๊ฐ์ฉ์์ญ์ public subnet, private subnet์ ์์ฑํ๋ค.
# vpc.tf
# Internet VPC
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
instance_tenancy = "default" # default: ๋ฌผ๋ฆฌ์ ํ๋์จ์ด์ ์ฌ๋ฌ ์ธ์คํด์ค๋ฅผ ๊ฐ์ง ์ ์๋ค.
enable_dns_support = "true" # true: dns ํ์ฑํ
enable_dns_hostnames = "true" # true: ๋ด๋ถ hostname ์ฌ์ฉ
tags = {
Name = "main"
}
}
# Subnets
resource "aws_subnet" "main-public-1" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
map_public_ip_on_launch = "true"
availability_zone = "ap-northeast-2a"
tags = {
Name = "main-public-1"
}
}
resource "aws_subnet" "main-public-2" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.2.0/24"
map_public_ip_on_launch = "true"
availability_zone = "ap-northeast-2b"
tags = {
Name = "main-public-2"
}
}
resource "aws_subnet" "main-private-1" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.4.0/24"
map_public_ip_on_launch = "false"
availability_zone = "ap-northeast-2a"
tags = {
Name = "main-private-1"
}
}
resource "aws_subnet" "main-private-2" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.5.0/24"
map_public_ip_on_launch = "false"
availability_zone = "ap-northeast-2b"
tags = {
Name = "main-private-2"
}
}
public subnet์ด ์ธํฐ๋ท์ ์ฌ์ฉํ ์ ์๋๋ก internet gateway๋ฅผ ์์ฑํ๊ณ , public subnet์ ์ ์ฉํ route table์ ์์ฑํ๋ค.
public subnet์ ์ธ๋ถ ์ฐ๊ฒฐ์ด ๊ฐ๋ฅํด์ผํ๋ฏ๋ก, route table์์ ๋ด๋ถ IP(ํด๋น vpc ๋ฒ์์ IP)๋ฅผ ์ ์ธํ ๋ชจ๋ IP๊ฐ IGW๋ก ๋ผ์ฐํ
๋๋๋ก ์ค์ ํ๋ค. ๋ชจ๋ public subnet์ ํด๋น route table์ ์ฐ๊ฒฐํด์ค๋ค.
# vpc.tf
# Internet GW
resource "aws_internet_gateway" "main-gw" {
vpc_id = aws_vpc.main.id
tags = {
Name = "main"
}
}
# route tables
resource "aws_route_table" "main-public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main-gw.id
}
tags = {
Name = "main-public-1"
}
}
# public subnet์ route table ์ฐ๊ฒฐ
resource "aws_route_table_association" "main-public-1-a" {
subnet_id = aws_subnet.main-public-1.id
route_table_id = aws_route_table.main-public.id
}
resource "aws_route_table_association" "main-public-2-a" {
subnet_id = aws_subnet.main-public-2.id
route_table_id = aws_route_table.main-public.id
}
private subnet์ instance๋ private ip๋ง ๊ฐ์ง๊ฒ๋๊ณ , ์ธํฐ๋ท์ ์ก์ธ์คํ ์ ์์ง๋ง, ๊ทธ ๋ฐ๋๋ ์๋๋๋ก ์ค์ ํด์ผํ๋ค. ์ด๋ฅผ ์ํด NAT gateway๋ฅผ ์์ฑํ๋ค.
# nat.tf
# nat๋ฅผ ์ํด ํ๋ ฅ์ ip (๊ณ ์ ip) ์์ฑ
resource "aws_eip" "nat" {
domain = "vpc"
}
# public subnet์ nat gateway ์์ฑ
resource "aws_nat_gateway" "nat-gw" {
allocation_id = aws_eip.nat.id
subnet_id = aws_subnet.main-public-1.id
# IGW๋ฅผ ๋ช
์ํ๋ ๊ฒ์ด ์ถ์ฒ๋๋ค.
depends_on = [aws_internet_gateway.main-gw]
}
private subnet์ ์ํ route table์ ์์ฑํ๋ค. ๋ด๋ถ ip๋ฅผ ์ ์ธํ ip์ ๋ํ ์์ฒญ์ NAT gateway๋ก ์ ๋ฌ๋ ๊ฒ์ด๋ค.
# nat.tf
# VPC setup for NAT
resource "aws_route_table" "main-private" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.nat-gw.id
}
tags = {
Name = "main-private-1"
}
}
# private subnet์ route table ์ฐ๊ฒฐ
resource "aws_route_table_association" "main-private-1-a" {
subnet_id = aws_subnet.main-private-1.id
route_table_id = aws_route_table.main-private.id
}
resource "aws_route_table_association" "main-private-2-a" {
subnet_id = aws_subnet.main-private-2.id
route_table_id = aws_route_table.main-private.id
}
์์ฑํ vpc ํ์ธ
$ aws ec2 describe-vpcs --filter 'Name=isDefault,Values=false'
EC2
์์ฑํ VPC์ ์ธ์คํด์ค๋ฅผ ์์ฑํ๊ธฐ์ ์์, security group ์ค์ ์ ํด๋ณด์.
๋ด pc๋ก๋ถํฐ์ SSH ์ ๊ทผ์ ์ํ inboud ๊ท์น์ ์์ฑํ์.
# securitygroup.tf
resource "aws_security_group" "allow-ssh" {
vpc_id = aws_vpc.main.id # ์์ฑํ vpc id๋ฅผ ์ง์
name = "allow-ssh"
description = "security group that allows ssh and all egress traffic"
egress {
from_port = 0 # ๋ชจ๋ port
to_port = 0
protocol = "-1" # ๋ชจ๋ protocol
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["๋ด ip ์ง์ "]
}
tags = {
Name = "allow-ssh"
}
}
์ธ์คํด์ค ์ ๊ทผ์ ์ํ key pair ์ค์ ๋ ์ถ๊ฐํ์. ssh key๋ฅผ ์์ฑํ๊ณ public key๋ฅผ aws์ ์
๋ก๋ํด ํด๋น public key๊ฐ ์ค์น๋ ์ธ์คํด์ค๋ฅผ ์ค์ ํ ์ ์๋ค. (์ข ๋ ์์ธํ ๊ณผ์ ์ ์ด์ ํฌ์คํ
์ฐธ๊ณ )
# key.tf
resource "aws_key_pair" "mykeypair" {
key_name = "mykeypair"
public_key = file("mykey.pub")
}
์์ฑํ๋ VPC ๋ด์ security group, ssh key ์ ๋ณด์ ํจ๊ป ์ธ์คํด์ค๋ฅผ ์์ฑํด๋ณด์.
# instance.tf
resource "aws_instance" "example" {
ami = var.AMIS[var.AWS_REGION]
instance_type = "t2.micro"
# VPC subnet
subnet_id = aws_subnet.main-public-1.id
# security group
vpc_security_group_ids = [aws_security_group.allow-ssh.id]
# public SSH key
key_name = aws_key_pair.mykeypair.key_name
}
output "ip" {
value = aws_instance.example.public_ip
}
์ธ์คํด์ค๊ฐ ์์ฑ๋๋ฉด, private key๋ก ํด๋น ์ธ์คํด์ค์ ์ ์ํ๋ค.
$ ssh -i mykey ubuntu@ec2-public-ip
EBS volume
์ธ์คํด์ค ์์ฑ์, ebs ๋ณผ๋ฅจ์ ์ฐ๊ฒฐํ์ฌ ์์ฑํด๋ณด์. ์ธ์คํด์ค์ az์ ๊ฐ์ az์ ebs๋ฅผ ์์ฑํ๊ณ , ์ฐ๊ฒฐ์ ์ํด resource "aws_volume_attachment" ์ ์ฌ์ฉํ๋ค.
# instance.tf (์ด ์์์์๋ instance.tf์ ์ด์ด์ ์์ฑ)
# ebs volume
resource "aws_ebs_volume" "ebs-volume-1" {
availability_zone = "ap-northeast-2a"
size = 20
type = "gp2"
tags = {
Name = "extra volume data"
}
}
# ์ธ์คํด์ค์ ebs ์ฐ๊ฒฐ
resource "aws_volume_attachment" "ebs-volume-1-attachment" {
device_name = "/dev/xvdh"
volume_id = aws_ebs_volume.ebs-volume-1.id
instance_id = aws_instance.example.id
stop_instance_before_detaching = true # volume์ ๋ถ๋ฆฌํ๊ธฐ ์ ์, ์ธ์คํด์ค๊ฐ ์ค์ง๋์๋์ง ํ์ธ
}
์ธ์คํด์ค ์์ฑ ํ ์ ์
$ ssh -i mykey ubuntu@ec2-public-ip
์คํ ๋ฆฌ์ง ๋๋ฐ์ด์ค ํ์ธ
$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
...
xvda 202:0 0 8G 0 disk
โโxvda1 202:1 0 7.9G 0 part /
โโxvda14 202:14 0 4M 0 part
โโxvda15 202:15 0 106M 0 part /boot/efi
xvdh 202:112 0 20G 0 disk
- / ์ mount๋ 8GB์ ๋ฃจํธ ์ ์ฅ๊ณต๊ฐ์ ํ์ธํ ์ ์๋ค.
- ์ฐ๊ฒฐ๋ ๋๋ฐ์ด์ค xvdh๋ ํ์ผ ์์คํ
์ ๋ง์ดํธ๋์ง ์์ ์ํ
ํ์ผ์์คํ
์์ฑ ๋ฐ ๋ง์ดํธ
$ mkfs.ext4 /dev/xvdh
$ mkdir /data
$ mount /dev/xvdh/data
$ df -h
Filesystem Size Used Avail Use% Mounted on
/dev/root 7.6G 1.6G 6.0G 21% /
...
/dev/xvdh 20G 24K 19G 1% /data
mount๋ฅผ ์๊ตฌ ๋ฑ๋กํ๊ณ ์ถ์ผ๋ฉด, /etc/fstab์ ์ถ๊ฐํ๋ค. ์ฌ๋ถํ
์์๋ ๋ง์ดํธ๋ฅผ ์ ์งํ ์ ์๋ค.
ํ์ง๋ง ๋ง์ฝ ์ธ์คํด์ค๋ฅผ ์ข
๋ฃํ๊ณ ํ
๋ผํผ์ ์ฌ์ฉํด์ ์ธ์คํด์ค๋ฅผ ๋ค์ ์์ํ๋ฉด ํด๋น ๋ด์ฉ๋ ์ ์งํ ์ ์์ ๊ฒ์ด๋ค. (/etc/fstab ์ / ์ ๊ธฐ๋ฐํ๋๋ฐ / ๊ฐ ์ฌ๋ผ์ง๊ธฐ ๋๋ฌธ)
-> user_data๋ฅผ ์ฌ์ฉํ๋ฉด, ์์ํ ๋ ์คํฌ๋ฆฝํธ๋ฅผ ์ฌ์ฉํ ์ ์๋ค.
Userdata
aws์์ Userdata๋, ์์ํ ๋์ ์ปค์คํ
์ ์ฌ์ฉ๋๋ค.
- ์ถ๊ฐ software๋ฅผ ์ค์นํ ๋
- cluster์ joinํ๊ธฐ ์ํ ์ธ์คํด์ค๋ฅผ ์ค๋นํ ๋
- command, scripts๋ฅผ ์คํํ ๋
- volume์ ๋ง์ดํธํ ๋
Userdata๋ ์์ํ ๋ ๋ณผ๋ฅจ์ ๋ง์ดํธํ๋ ๋ฐ์ ์ ์ฉํ๋ฉฐ, ์ธ์คํด์ค์ ์ฌ๋ถํ
์ด ์๋ ์์ฑ ์์๋ง ์คํ๋๋ค.
๋ฌธ์์ด ์ฌ์ฉ
# instance.tf
resource "aws_instance" "example" {
ami = var.AMIS[var.AWS_REGION]
instance_type = "t2.micro"
# VPC subnet
subnet_id = aws_subnet.main-public-1.id
# security group
vpc_security_group_ids = [aws_security_group.allow-ssh.id]
# public SSH key
key_name = aws_key_pair.mykeypair.key_name
# user data
user_data = <<-EOF
#!/bin/bash
echo "Hello, World!" > hello.txt
EOF
}
- ์ธ์คํด์ค ์ ์ํ /hello.txt ํ์ธ
File ์ฌ์ฉ
user_data์ file ๋ฑ๋ก ํ ์คํ
# install_docker.sh
#! /bin/bash
sudo apt-get update
sudo apt-get -y install \
ca-certificates \
curl \
gnupg \
lsb-release \
unzip \
jq
# Set up the repository
sudo mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
# Install Docker Engine
sudo apt-get update
sudo apt-get -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo usermod -aG docker $USER
# instance.tf
user_data = "${file("install_docker.sh")}"
template_file ํจ๊ป ์ฌ์ฉ ์์
# instance.tf
resource "template_file" "user_data" {
template = "${file("install_docker.sh")}"
# ์ฌ๊ธฐ์ vars๋ฅผ ์ฌ์ฉํ์ง ์์ง๋ง ๋ณ์๋ฅผ ์ ๋ฌํ๊ธฐ ์ํ ์์๋ก ๋ .
vars {
package = "temp"
command = "temp"
}
}
resource "aws_instance" "example" {
ami = "ami-12345678"
instance_type = "t2.micro"
user_data = "${template_file.user_data.rendered}"
}
cloud-init ์ ์ฌ์ฉํด ์ธ์คํด์ค ์ด๊ธฐํ๋ฅผ ์๋ํํด๋ณด์.
init.cfg ์ ์ฒซ๋ฒ์งธ ๋ถํ
๋ช
๋ น์ ์ ์ํ์. ํ์ผ ์๋จ์ #cloud-config ๋ฅผ ํฌํจํด์ผํ๋ค.
# scripts/init.cfg
#cloud-config
repo_update: true
repo_upgrade: all
packages:
- lvm2
output:
all: '| tee -a /var/log/cloud-init-output.log'
- ๋ฆฌ๋
์ค volume manager์ธ lvm2๋ฅผ ์ค์นํ๋ค.
ํ์ผ์์คํ
์ ๋ณผ๋ฅจ์ ๋ง์ดํธํ๋ bash script๋ฅผ ์์ฑํ์.
# scripts/volumes.sh
#!/bin/bash
set -ex
vgchange -ay
DEVICE_FS=`blkid -o value -s TYPE ${DEVICE} || echo ""`
if [ "`echo -n $DEVICE_FS`" == "" ] ; then
# wait for the device to be attached
DEVICENAME=`echo "${DEVICE}" | awk -F '/' '{print $3}'`
DEVICEEXISTS=''
while [[ -z $DEVICEEXISTS ]]; do
echo "checking $DEVICENAME"
DEVICEEXISTS=`lsblk |grep "$DEVICENAME" |wc -l`
if [[ $DEVICEEXISTS != "1" ]]; then
sleep 15
fi
done
# make sure the device file in /dev/ exists
count=0
until [[ -e ${DEVICE} || "$count" == "60" ]]; do
sleep 5
count=$(expr $count + 1)
done
pvcreate ${DEVICE}
vgcreate data ${DEVICE}
lvcreate --name volume1 -l 100%FREE data
mkfs.ext4 /dev/data/volume1
fi
mkdir -p /data
echo '/dev/data/volume1 /data ext4 defaults 0 0' >> /etc/fstab
mount /data
# install docker
curl https://get.docker.com | bash
# cloudinit.tf
data "template_file" "init-script" {
template = "${file("scripts/init.cfg")}"
}
data "template_file" "shell-script" {
template = "${file("scripts/volumes.sh")}"
vars = {
DEVICE = "${var.INSTANCE_DEVICE_NAME}"
}
}
data "template_cloudinit_config" "cloudinit-example" {
gzip = false
base64_encode = false
part {
filename = "init.cfg"
content_type = "text/cloud-config"
content = "${data.template_file.init-script.rendered}"
}
part {
content_type = "text/x-shellscript"
content = "${data.template_file.shell-script.rendered}"
}
}
- datasource template_file ์ ์ผ๋ฐ์ ์ผ๋ก ์ธ๋ถ ํ์ผ์์ ๋ก๋๋๋ ํ ํ๋ฆฟ string์ผ๋ก๋ถํฐ ํ ํ๋ฆฟ์ ๋ ๋๋งํ๋ค. vars๋ฅผ ํตํด ํ ํ๋ฆฟ์ ๋ณ์๋ฅผ ๋๊ธธ ์ ์๋ค.
- template_file์ ํตํด $DEVICE ๊ฐ์ ๋๊ฒจ์ค๋ค. (var.INSTANCE_DEVICE_NAME)
# instance.tf
resource "aws_instance" "example" {
ami = var.AMIS[var.AWS_REGION]
instance_type = "t2.micro"
# VPC subnet
subnet_id = aws_subnet.main-public-1.id
# security group
vpc_security_group_ids = [aws_security_group.allow-ssh.id]
# public SSH key
key_name = aws_key_pair.mykeypair.key_name
user_data = data.template_cloudinit_config.cloudinit-example.rendered
}
resource "aws_ebs_volume" "ebs-volume-1" {
availability_zone = "ap-northeast-2a"
size = 20
type = "gp2"
tags = {
Name = "extra volume data"
}
}
# ์ธ์คํด์ค์ ebs ์ฐ๊ฒฐ
resource "aws_volume_attachment" "ebs-volume-1-attachment" {
device_name = var.INSTANCE_DEVICE_NAME
volume_id = aws_ebs_volume.ebs-volume-1.id
instance_id = aws_instance.example.id
stop_instance_before_detaching = true # volume์ ๋ถ๋ฆฌํ๊ธฐ ์ ์, ์ธ์คํด์ค๊ฐ ์ค์ง๋์๋์ง ํ์ธ
}
output "ip" {
value = aws_instance.example.public_ip
}
- user_data์ ์ ์ํ template_cloudinit_config ๋ฅผ ๋๊ฒจ์ค๋ค.
๋๋จธ์ง๋ ๊ธฐ์กด๊ณผ ๊ฐ๋ค. terraform apply๋ฅผ ์ํํ๋ฉด, ์ธ์คํด์ค ์ด๊ธฐํ์ script๋ฅผ ํตํด volume์ ๋ง์ดํธํ๊ณ docker๋ฅผ ์ค์นํ ๊ฒ์ด๋ค.
๊ณ ์ IP, EIP, Route53
๊ณ ์ private ip
๋ณดํต private ip๋ ์๋ธ๋ท์ ๋ฒ์ ๋ด์์ EC2 ์ธ์คํด์ค์ ์๋์ผ๋ก ํ ๋น๋๋ค. ์ด private ip๋ฅผ ์ง์ ํ ์๋ ์๋ค.
resource "aws_instance" "example" {
ami = var.AMIS[var.AWS_REGION]
instance_type = "t2.micro"
subnet_id = aws_subnet.main-public-1.id
private_ip = "10.0.1.4" # main-public-1 subnet์ ๋ฒ์ ๋ด์์ ์ง์
}
EIP
์ธ์คํด์ค์ ์ฐ๊ฒฐํ ์ ์๋ ๊ณ ์ public ip
resource "aws_eip" "example-eip" {
instance = "${aws_instance.example.id}"
vpc = true
}
์ถ๋ ฅ
output "ip" {
value = aws_eip.example-eip.public_ip
}
Route53
route53์ ํตํด aws์์ ๋๋ฉ์ธ ์ด๋ฆ์ ํธ์คํ
ํ ์ ์๋ค. ์ฐ์ ์ ์ผ๋ก aws๋ ๊ณต์ธ ๋ฑ๋ก ๊ธฐ๊ด์ ์ฌ์ฉํด ๋๋ฉ์ธ ์ด๋ฆ์ ๋ฑ๋กํด์ผํ๋ค.
route53์ ํตํด example.com๊ณผ ๊ฐ์ ์์ญ์ ๋ง๋ค๊ณ , xx.example.com๊ณผ ๊ฐ์ DNS ๋ ์ฝ๋๋ฅผ ์ถ๊ฐํ ์ ์๋ค.
resource "aws_route53_zone" "example-com" {
name = "example.com"
}
resource "aws_route53_record" "xx-record" {
zone_id = "${aws_route53_zone.example-com.zone_id}"
name = "xx.example.com"
type = "A" # ํธ์คํธ ์ด๋ฆ์ ip๋ก ๊ฒฐ์ ํ๊ธฐ ์ํจ
ttl = "300" # ์บ์์ ๋จธ๋ฌด๋ฅด๋ ์๊ฐ(์ด)
records = ["${aws_eip.example-eip.public_ip}"] # public ip์ง์
}
# ํน์ ๋๋ฉ์ธ์ ๋ค์์๋ฒ ์ถ๋ ฅ
output "nameservers" {
value = aws_route53_zone.example-com.name_servers
}
host ์ ๋ณด ํ์ธ
์ถ๋ ฅ๋๋ ๋ค์์๋ฒ๋ฅผ ํตํด ๋๋ฉ์ธ ์ ๋ณด๋ฅผ ๋ฌผ์ด๋ณผ ์ ์๋ค.
# host {record} {name server}
$ host xx.example.com ns-1111.awsdns-11.org
RDS
aws์ RDS(Relational Database Services)๋ ์๋ง์กด์์ ์ ๊ณตํ๋ ๊ด๋ฆฌํ ๋ฐ์ดํฐ๋ฒ ์ด์ค ์๋ฃจ์
์ด๋ค. ์ฌ๋ฌ๊ฐ์ง ์ ์ฉํ ๊ธฐ๋ฅ์ ์ ๊ณตํ๋ค.
- ์ฌ์ด replication ์ค์ (๊ณ ๊ฐ์ฉ์ฑ, ์๋ก ๋ณต์ ํ๋ ๋ ๊ฐ์ ์ธ์คํด์ค ์ค ์ฝ๊ฒ ๋ง์คํฐ๋ฅผ ์ ํ)
- ์๋ snapshots (๋ฐฑ์
)
- ์๋ ๋ณด์ ์
๋ฐ์ดํธ (ํน์ ์๊ฐ์ ์ธ์คํด์ค ์
๊ทธ๋ ์ด๋)
- ์ฌ์ด ์ธ์คํด์ค ๊ต์ฒด (์์ง ํ์ฅ)
RDS ์ธ์คํด์ค๋ฅผ ์์ฑํ๊ธฐ ์ํด์๋ ๋ค์๊ณผ ๊ฐ์ ๊ณผ์ ์ด ํ์ํ๋ค.
1. RDS ์ธ์คํด์ค๊ฐ ๋ฐฐ์น๋ ์๋ธ๋ท ๊ทธ๋ฃน ์์ฑ
2. ํ๋ผ๋ฏธํฐ ๊ทธ๋ฃน ์์ฑ
- ๋ฐ์ดํฐ๋ฒ ์ด์ค ์ค์ ์ ์ํ ํ๋ผ๋ฏธํฐ ์ง์
3. RDS ์ธ์คํด์ค๋ก ๋ค์ด์ค๋ ํธ๋ํฝ ํ์ฉ์ ์ํ security group ์์ฑ
4. RDS ์ธ์คํด์ค ์์ฑ
์์์ ์์ฑํ example ์ธ์คํด์ค(public subnet)๊ฐ ์๋ค๊ณ ๊ฐ์ ํ๊ณ , example ์ธ์คํด์ค์์ ์ ๊ทผ ๊ฐ๋ฅํ mariaDB ์ธ์คํด์ค๋ฅผ ์์ฑํ ๊ฒ์ด๋ค.
*aws์์ ์ง์ํ๋ RDS ์ธ์คํด์ค๋ ์ฌ๋ฌ๊ฐ์ง๊ฐ ์๋ค. ์ฌ๊ธฐ์๋ mariaDB๋ฅผ ์ฌ์ฉํ๋ค.
์๋ธ๋ท ๊ทธ๋ฃน ์์ฑ
RDS ์ธ์คํด์ค๊ฐ ๋ฐฐ์น๋ ์๋ธ๋ท ๊ทธ๋ฃน ์์ฑ
# rds.tf
resource "aws_db_subnet_group" "mariadb-subnet" {
name = "mariadb-subnet"
description = "RDS subnet group"
subnet_ids = [aws_subnet.main-private-1.id, aws_subnet.main-private-2.id]
}
ํ๋ผ๋ฏธํฐ ๊ทธ๋ฃน ์์ฑ
mariadb์ ํ๋ผ๋ฏธํฐ๋ฅผ ์ง์
(์ธ์คํด์ค์ ๋ํ access ๊ถํ์ด ์๋ ๊ฒฝ์ฐ db ์ค์ ์ ๋ณ๊ฒฝํ๋ ์ ์ผํ ๋ฐฉ๋ฒ์ด ๋ ๊ฒ์ด๋ค.)
# rds.tf
resource "aws_db_parameter_group" "mariadb-parameters" {
name = "mariadb-parameters"
family = "mariadb10.4" # mariaDB ๋ฒ์
description = "MariaDB parameter group"
parameter {
name = "max_allowed_packet"
value = "16777216"
}
}
๋ณด์ ๊ทธ๋ฃน ์์ฑ
# securitygroup.tf
# ssh ์ ๊ทผ์ ์ํ security group
resource "aws_security_group" "example-instance" {
vpc_id = aws_vpc.main.id
name = "allow-ssh"
description = "security group that allows ssh and all egress traffic"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "example-instance"
}
}
# mariadb ์ธ์คํด์ค์ ๋ํ security group
resource "aws_security_group" "allow-mariadb" {
vpc_id = aws_vpc.main.id
name = "allow-mariadb"
description = "allow-mariadb"
ingress {
from_port = 3306
to_port = 3306
protocol = "tcp"
security_groups = [aws_security_group.example-instance.id] # allowing access from our example instance
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
self = true
}
tags = {
Name = "allow-mariadb"
}
}
RDS ์ธ์คํด์ค ์์ฑ
# rds.tf
resource "aws_db_instance" "mariadb" {
allocated_storage = 100 # 100 GB of storage
engine = "mariadb"
engine_version = "10.4"
instance_class = "db.t2.small" # free tier ์ฌ์ฉ์ micro๋ก ์ง์
identifier = "mariadb"
db_name = "mariadb"
username = "root" # username
password = var.RDS_PASSWORD # password
db_subnet_group_name = aws_db_subnet_group.mariadb-subnet.name
parameter_group_name = aws_db_parameter_group.mariadb-parameters.name
multi_az = "false" # true๋ก ์ง์ ํ๋ฉด, ์๋ก๋ฅผ ๋๊ธฐํํ๋ ๋๊ฐ์ ์ธ์คํด์ค๋ฅผ ํตํด ๊ณ ๊ฐ์ฉ์ฑ์ ๊ฐ์ง๋ค.
vpc_security_group_ids = [aws_security_group.allow-mariadb.id]
storage_type = "gp2"
backup_retention_period = 30 # ๋ฐฑ์
์ ์ง ๊ธฐ๊ฐ
availability_zone = aws_subnet.main-private-1.availability_zone # AZ
skip_final_snapshot = true # terraform destroy์, ๋ง์ง๋ง snapshot์ ์คํต
tags = {
Name = "mariadb-instance"
}
}
# output.tf
output "rds" {
value = "${aws_db_instance.mariadb.endpoint}"
}
# vars.tf
variable "RDS_PASSWORD" {
}
*apply์ variable์ ์ ๋ฌํ ์ ์๋ค.
$ terraform apply -var RDS_PASSWORD=mypw
์์ฑ ํ ์ ์
$ ssh -i mykey ubuntu@example-instance-ip
mysql client ์ค์น
$ sudo apt-get install mysql-client
$ mysql -u root -h {์ถ๋ ฅ๋ mariadb endpoint} -p 'mypw'
mysql > show databases;
IAM
IAM์ aws์ ๋ฆฌ์์ค์ ์ ๊ทผ์ ๊ด๋ฆฌํ ์ ์๋ ์๋น์ค์ด๋ค. ๊ทธ๋ฃน, ์ฌ์ฉ์, ๊ทธ๋ฆฌ๊ณ IAM์ ๋ํ ์ญํ ์ ๋ง๋ค ์ ์๋ค.
- ์ฌ์ฉ์๋ ๊ทธ๋ฃน์ ๊ฐ์ง ์ ์๋ค. ์๋ฅผ ๋ค์ด admin ๊ทธ๋ฃน์ ์ฌ์ฉ์์๊ฒ admin ๊ถํ์ ์ค ์ ์๋ค.
- ์ฌ์ฉ์๋ login/password๋ฅผ ํตํด ์ธ์ฆํ ์๋ ์๊ณ , access key์ secret key (API keys) ๋ฅผ ํตํด ์ธ์ฆํ ์๋ ์๋ค.
์ญํ ์ ํตํด ์ฌ์ฉ์, ์๋น์ค์ ์์ ๊ถํ์ ์ค ์ ์๋ค.
์๋ฅผ๋ค์ด, mybucket-access๋ ์ญํ ์ ์์ฑํด, ec2์ ๋ถํ
์ ํด๋น ์ญํ ์ ๋ถ์ฌํ๋ค. ๊ทธ๋ฆฌ๊ณ ์ญํ ์ mybucket์ด๋ ๋ฒํท์ ์๋ item์ ์ฝ๊ณ ์ธ ์ ์๋ ๊ถํ์ ์ค๋ค. ๊ทธ๋ฌ๋ฉด ๋ก๊ทธ์ธํ ๋ ์ฌ์ฉ์ ์๊ฒฉ ์ฆ๋ช
์ ํ์ง ์์๋, mybucket-access์ ์ญํ ์ ์ํํ ์ ์๋ค.
aws-cli๋ฅผ ์ฌ์ฉํ๋ ๋์ , ์๋น์ค๊ฐ ์ญํ ์ ๋์ ํ ์๋ ์๋ค. ์๋น์ค๊ฐ aws sdk๋ฅผ ๊ตฌํํด์, s3 ์ ๊ทผ์ aws์ ๋ํ api ํธ์ถ์ด ๋ฐ์ํ๋ ๊ฒ์ด๋ค. (???)
admin ๊ถํ
admin์ฉ group์ ์์ฑํ๊ณ , user๋ฅผ ํด๋น group์ ์ค์ ํ๋ค.
# iam.tf
# admin group ์์ฑ
resource "aws_iam_group" "administrators" {
name = "administrators"
}
# admin groupt์ admin policy๋ฅผ ๋ถ์ฌํ๋ค.
resource "aws_iam_policy_attachment" "administrators-attach" {
name = "administrators-attach"
groups = [aws_iam_group.administrators.name]
policy_arn = "arn:aws:iam::aws:policy/AdministratorAccess" # ๊ธฐ๋ณธ์ ์ผ๋ก ์กด์ฌํ๋ policy
}
# user ์์ฑ
resource "aws_iam_user" "admin1" {
name = "admin1"
}
# admin group์ user ์ค์
resource "aws_iam_group_membership" "administrators-users" {
name = "administrators-users"
users = [
aws_iam_user.admin1.name
]
group = aws_iam_group.administrators.name
}
ec2 ์ธ์คํด์ค์ ์ฐ๊ฒฐํ ์ญํ ์ ์์ฑํด๋ณด์.
iam role ์์ฑ
# iam.tf
resource "aws_iam_role" "s3-mybucket-role" {
name = "s3-mybucket-role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
instance profile ์์ฑ
์ฐธ๊ณ
*์ฝ์์ ํตํด ec2 ์ญํ ์ ์์ฑํ๋ฉด, ์ฝ์์ด ์๋์ผ๋ก ์ธ์คํด์ค ํ๋กํ์ผ์ ์์ฑํ์ฌ, ํด๋น ์ญํ ๊ณผ ๋์ผํ ์ด๋ฆ์ ๋ถ์ฌํ๋ค. ๊ทธ ํ ์ฝ์๋ก IAM ์ญํ ์ ํตํด ์ธ์คํด์ค๋ฅผ ์คํํ ๋๋ ์ธ์คํด์ค์ ์ฐ๊ฒฐํ ์ญํ ์ ์ ํํ ์ ์๋๋ฐ, ์ฝ์์ ํ์๋๋ ๋ชฉ๋ก์ด ์ค์ ๋ก ์ธ์คํด์ค ํ๋กํ์ผ ์ด๋ฆ์ ๋ชฉ๋ก์ด๋ค.
# iam.tf
resource "aws_iam_instance_profile" "s3-mybucket-role-instanceprofile" {
name = "s3-mybucket-role"
role = aws_iam_role.s3-mybucket-role.name
}
instance์ iam instance profile ์ฐ๊ฒฐ
resource "aws_instance" "example" {
ami = var.AMIS[var.AWS_REGION]
instance_type = "t2.micro"
# the VPC subnet
subnet_id = aws_subnet.main-public-1.id
# the security group
vpc_security_group_ids = [aws_security_group.example-instance.id]
# the public SSH key
key_name = aws_key_pair.mykeypair.key_name
# role:
iam_instance_profile = aws_iam_instance_profile.s3-mybucket-role-instanceprofile.name
}
๋ฒํท ์์ฑ
# s3.tf
resource "aws_s3_bucket" "b" {
bucket = "mybucket-c29df1"
tags = {
Name = "mybucket-c29df1"
}
}
๋ฒํท์ ๋ํ policy ์์ฑ
์์ฑํ ๋ฒํท์ ๋ํ action์ ํ์ฉํ๋ policy๋ฅผ ์์ฑ
# iam.tf
resource "aws_iam_role_policy" "s3-mybucket-role-policy" {
name = "s3-mybucket-role-policy"
role = aws_iam_role.s3-mybucket-role.id # ์์์ ๋ง๋ ์ญํ ์ ์ฐธ์กฐํ๋ค.
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:*" # ๋ชจ๋ s3 action
],
"Resource": [
"arn:aws:s3:::mybucket-c29df1", # ์์ฑํ bucket์ ๋ํ ๊ถํ
"arn:aws:s3:::mybucket-c29df1/*"
]
}
]
}
EOF
}
terraform apply๋ฅผ ์ํํ๊ณ s3-mybucket-role์ด ๋ถ์ฌ๋ ์ ์ธ์คํด์ค์ ์ ์ํด๋ณด์. (*key, security group, vpc๋ฑ์ ์ค์ ์ ์์ ์๋ ๋ด์ฉ๊ณผ ๋ค๋ฅด์ง ์์.)
$ ssh -i mykey ubuntu@example-instance-ip
aws cli๋ฅผ ์ค์นํด์ฃผ์.
$ sudo -s
$ apt-get install -y python-pip python-dev
$ pip install -y awscli
test์ฉ ํ์ผ์ ํ๋ ๋ง๋ค์ด, ์์ฑํ bucket์ ๋ฃ์ด์ค๋ค.
$ echo "test" > test.txt
$ aws s3 cp test.txt s3://mybucket-c29df1/test.txt
๋ก๊ทธ์ธ๋ ์ํ๊ณ , aws ์๊ฒฉ ์ฆ๋ช
๋ ์์ด ์
๋ก๋๊ฐ ๋๋ค. ํด๋น bucket์ access๊ฐ ๊ฐ๋ฅํ๋ค.
์ด ์๊ฒฉ ์ฆ๋ช
์ aws ๋ฉํ ๋ฐ์ดํฐ์ ์ ์ฅ๋์ด ์๋ค.
$ curl http://{ip}/latest/meta-data/iam/security-credentials
Autoscaling
ELB
ALB
'DevOps' ์นดํ ๊ณ ๋ฆฌ์ ๋ค๋ฅธ ๊ธ
[Istio] ๋ก๋๋ฐธ๋ฐ์ฑ (+ConsistentHashing) (0) | 2024.01.29 |
---|---|
[Istio] ํธ๋ํฝ ๊ด๋ฆฌ (+์นด๋๋ฆฌ ๋ฐฐํฌ) (0) | 2024.01.23 |
[Istio] Telemetry (kiali, jaeger) (0) | 2024.01.14 |
[Istio] Overview (0) | 2023.12.11 |
[terraform] ํ ๋ผํผ ๊ธฐ์ด (0) | 2023.05.24 |