Enable Ceph Object Gateway (RADOSGW) to access to Ceph Cluster Storage via Amazon S3 or OpenStack Swift compatible API.
This example is based on the environment like follows.
+--------------------+ | +----------------------+
| client.srv.local] |10.0.0.30 | 10.0.0.31| [www.srv.local] |
| Ceph Client +-----------+-----------+ RADOSGW |
| | | | |
+--------------------+ | +----------------------+
+----------------------------+----------------------------+
| | |
|10.0.0.51 |10.0.0.52 |10.0.0.53
+-----------+-----------+ +-----------+-----------+ +-----------+-----------+
| [node01.srv.local] | | [node02.srv.local] | | [node03.srv.local] |
| Object Storage +----+ Object Storage +----+ Object Storage |
| Monitor Daemon | | | | |
| Manager Daemon | | | | |
+-----------------------+ +-----------------------+ +-----------------------+
Mục Lục
[1] Transfer required files to RADOSGW Node and Configure it from Admin Node.
# transfer public key
root@node01:~# ssh-copy-id www
# install required packages
root@node01:~# ssh www "apt -y install radosgw"
root@node01:~# vi /etc/ceph/ceph.conf
# add to the end
# client.rgw.(Node Name)
[client.rgw.www]
# IP address of the Node
host = 10.0.0.31
# set listening port
rgw frontends = "civetweb port=8080"
# DNS name
rgw dns name = www.srv.local
# transfer files
root@node01:~# scp /etc/ceph/ceph.conf www:/etc/ceph/
ceph.conf 100% 374 342.6KB/s 00:00
root@node01:~# scp /etc/ceph/ceph.client.admin.keyring www:/etc/ceph/
ceph.client.admin.keyring 100% 151 73.7KB/s 00:00
# configure RADOSGW
root@node01:~# ssh www \
"mkdir -p /var/lib/ceph/radosgw/ceph-rgw.www; \
ceph auth get-or-create client.rgw.www osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/ceph-rgw.www/keyring; \
chown ceph. /etc/ceph/ceph.*; \
chown -R ceph. /var/lib/ceph/radosgw; \
systemctl enable --now ceph-radosgw@rgw.www"
# verify status
# that's OK if follwing answers shown
root@node01:~# curl www.srv.local:8080
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>
[2] On Object Gateway Node, Create a S3 compatible user who can authenticate to Object Gateway.
# for example, create [serverworld] user
root@www:~# radosgw-admin user create --uid=serverworld --display-name="Server Local" --email=admin@srv.local
{
"user_id": "serverlocal",
"display_name": "Server Local",
"email": "admin@srv.local",
"suspended": 0,
"max_buckets": 1000,
"subusers": [],
"keys": [
{
"user": "serverlocal",
"access_key": "SZLWEJH5OV6ZX83RS13K",
"secret_key": "oGxdGkDqjeKC29AIDYFoohpOfKpgUKFwfDBZbYmJ"
}
],
"swift_keys": [],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"default_storage_class": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw",
"mfa_ids": []
}
# show user list
root@www:~# radosgw-admin user list
[
"serverlocal"
]
root@www:~# radosgw-admin user info --uid=serverlocal
{
"user_id": "serverlocal",
"display_name": "Server Local",
"email": "admin@srv.local",
"suspended": 0,
"max_buckets": 1000,
"subusers": [],
"keys": [
{
"user": "serverlocal",
"access_key": "SZLWEJH5OV6ZX83RS13K",
"secret_key": "oGxdGkDqjeKC29AIDYFoohpOfKpgUKFwfDBZbYmJ"
}
.....
.....
[3] Verify accessing with S3 interface to create Python test script on a Computer.
root@client:~# apt -y install python3-boto3
root@client:~# vi s3_test.py
import sys
import boto3
from botocore.config import Config
# user's access-key and secret-key you added on [2] section
session = boto3.session.Session(
aws_access_key_id = 'SZLWEJH5OV6ZX83RS13K',
aws_secret_access_key = 'oGxdGkDqjeKC29AIDYFoohpOfKpgUKFwfDBZbYmJ'
)
# Object Gateway URL
s3client = session.client(
's3',
endpoint_url = 'http://10.0.0.31:8080',
config = Config()
)
# create [my-new-bucket]
bucket = s3client.create_bucket(Bucket = 'my-new-bucket')
# list Buckets
print(s3client.list_buckets())
# remove [my-new-bucket]
s3client.delete_bucket(Bucket = 'my-new-bucket')
root@client:~# python3 s3_test.py
{'ResponseMetadata': {'RequestId': 'tx000000000000000000003-005f4c6554-5e80-default', 'HostId': '', 'HTTPStatusCode': 200, 'HTTPHeaders': {'transfer-encoding': 'chunked', 'x-amz-request-id': 'tx000000000000000000003-005f4c6554-5e80-default', 'content-type': 'application/xml', 'date': 'Mon, 31 Aug 2020 02:49:56 GMT'}, 'RetryAttempts': 0}, 'Buckets': [{'Name': 'my-new-bucket', 'CreationDate': datetime.datetime(2020, 8, 31, 2, 49, 51, 299000, tzinfo=tzutc())}], 'Owner': {'DisplayName': 'Server Local', 'ID': 'serverlocal'}}