Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# ceph-bench
Tool for benchmark Ceph

# Install pre-requrements

### CentOS
`yum install python-configparser python-monotonic`
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

))) он во втором питоне есть искаропки, просто подругому называется. в итоге я зарефакторил так что выпилил его использование совсем. И да, напиши для каких версий центоса это. равно как и убунту.

### Debian/Ubuntu
* for Python2 `apt install python-configparser python-monotonic`
* for Python3 `apt install python3-rados`

# Prepare

```
cd ceph-bench
ceph auth export client.admin -o keyring.conf
ceph osd pool set single size 1
ceph osd pool create single 1 1
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

сначала креате, потом сет сайз.

и креате нифига не 1 1, а 128 128 или сколько там нужно.

```

# Run
* for Python2 `python2 main.py`
* for Python3 `python3 main.py`
56 changes: 30 additions & 26 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,50 @@
#!/usr/bin/python3
import sys
import json
import argparse
import logging
import os
import time
import rados
import configparser
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ну ведь не тестировал же. во втором питоне конфигпасер подругому называется.

В общем, про питон2 я уже пофиксил. про индентацию и пробелы по пеп8 -- пофикшу сам попозже.

from itertools import cycle, count
from pprint import pprint
from itertools import cycle

import ceph_argparse

log = logging.getLogger(__name__)


def do_bench(secs, name, ioctx, data):
b = a = time.monotonic()
try:
b = a = time.monotonic()
except (AttributeError):
import monotonic
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ну это капец. при каждом бенче делать импорт.

b = a = monotonic.monotonic()
stop = a + secs
ops = 0
try:
while b <= stop:
ioctx.write(name, next(data))
b = time.monotonic()
try:
b = time.monotonic()
except (AttributeError):
import monotonic
b = monotonic.monotonic()
ops += 1
finally:
try:
log.debug('Removing object %s.', name)
ioctx.remove_object(name)
except Exception as e:
log.error('Failed to remove object %s: %r', name, e)
return b-a, ops
return b - a, ops


def _cmd(cluster, cmd, **kwargs):
target = ceph_argparse.find_cmd_target(cmd.split())

argdict = {
'prefix': cmd,
'target': target,
'format': 'json',
'prefix': cmd,
'target': target,
'format': 'json',
}
argdict.update(kwargs)
log.debug('Calling ceph: %r', argdict)
Expand All @@ -52,13 +59,12 @@ def _cmd(cluster, cmd, **kwargs):
return json.loads(outbuf.decode('utf-8'))



def main():
logging.basicConfig(level=logging.INFO)
conf = {'keyring': 'keyring.conf'}
pool = 'single'
MODE = 'HOST' # HOST or OSD
secs = 10 # secs to benchmark
secs = 10 # secs to benchmark
bytesperobj = 4 * 1024 * 1024
bigdata = cycle([os.urandom(bytesperobj), os.urandom(bytesperobj)])

Expand All @@ -74,38 +80,35 @@ def main():
log.info('Attaching to CEPH cluster. pool=%s, rados_id=%s', pool, rados_id)
with rados.Rados(conffile='/etc/ceph/ceph.conf', rados_id=rados_id, conf=conf) as cluster:
log.info('Getting map osd -> host.')
#info = json.loads(subprocess.check_output(['ceph', 'osd', 'tree', '--format=json']).decode('utf-8'))
# info = json.loads(subprocess.check_output(['ceph', 'osd', 'tree', '--format=json']).decode('utf-8'))
info = _cmd(cluster, 'osd tree')
osd2host = {}
for i in info['nodes']:
if i ['type'] != 'host':
if i['type'] != 'host':
continue
for j in i['children']:
osd2host[j] = i['name']
pool_id = cluster.pool_lookup(pool)


log.info('Getting pg => acting set.')
#info = json.loads(subprocess.check_output(['ceph', '--format=json', 'pg', 'dump', 'pgs_brief']).decode('utf-8'))
# info = json.loads(subprocess.check_output(['ceph', '--format=json', 'pg', 'dump', 'pgs_brief']).decode('utf-8'))
info = _cmd(cluster, 'pg dump', dumpcontents=['pgs_brief'])


pgid2acting = {i['pgid']:tuple(i['acting']) for i in info if i['pgid'].startswith(str(pool_id))}
pgid2acting = {i['pgid']: tuple(i['acting']) for i in info if i['pgid'].startswith(str(pool_id))}
if MODE == 'HOST':
bench_items = set(tuple(osd2host[i] for i in osds) for osds in pgid2acting.values())
else:
bench_items = set(pgid2acting.values())


log.info('Figuring out object names for %d %s combinations.', len(bench_items), MODE)
obj2info = dict()
cnt = 0
totlen=len(bench_items)
totlen = len(bench_items)
while bench_items:
cnt = cnt + 1
name = 'bench_%d' % cnt

#info = json.loads(subprocess.check_output(['ceph', '-f', 'json', 'osd', 'map', pool, name]).decode('utf-8'))
# info = json.loads(subprocess.check_output(['ceph', '-f', 'json', 'osd', 'map', pool, name]).decode('utf-8'))
info = _cmd(cluster, 'osd map', object=name, pool=pool)

acting = tuple(info['acting'])
Expand All @@ -120,11 +123,11 @@ def main():
continue

bench_items.remove(bench_item)
log.info('Found %d/%d', totlen-len(bench_items), totlen)
log.info('Found %d/%d', totlen - len(bench_items), totlen)

obj2info[name] = (hosts, acting)

obj2info=dict(sorted(obj2info.items(), key=lambda i: i[1]))
obj2info = dict(sorted(obj2info.items(), key=lambda i: i[1]))

log.debug('Opening IO context for pool %s.', pool)
with cluster.open_ioctx(pool) as ioctx:
Expand All @@ -133,15 +136,16 @@ def main():
log.debug('Benchmarking IOPS on OSD %r (%r)', list(acting), ','.join(hosts))
delay, ops = do_bench(secs, name, ioctx, cycle([b'q', b'w']))
iops = ops / delay
lat = delay / ops # in sec
log.debug('Benchmarking Linear write on OSD %r (%r) blocksize=%d MiB', list(acting), ','.join(hosts), bytesperobj//(1024*1024))
lat = delay / ops # in sec
log.debug('Benchmarking Linear write on OSD %r (%r) blocksize=%d MiB', list(acting), ','.join(hosts),
bytesperobj // (1024 * 1024))
delay, ops = do_bench(secs, name, ioctx, bigdata)
bsec = ops * bytesperobj / delay

log.info(
'OSD %r (%r): %2.2f IOPS, lat=%.4f ms. %2.2f MB/sec (%2.2f Mbit/s).',
list(acting),
','.join(hosts),
','.join(hosts),
iops,
lat * 1000,
bsec / 1000000,
Expand Down