From 1cdb3ecffc679f29e9d77dc0d5ff33bdb1192b49 Mon Sep 17 00:00:00 2001 From: Gary Lowell Date: Tue, 23 Jul 2013 22:43:59 -0700 Subject: [PATCH 01/17] configure.ac: Remove -rc suffix from the configure version number. Remove the rc suffix since RPM complains about. For rc release builds the "rc" in the git describe string is suffcient for everyhting but RPM. For rc release builds (i.e. not gitbuilder) add a flag to the spec file. Signed-off-by: Gary Lowell --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 09fd81d722dca..db0a03134e677 100644 --- a/configure.ac +++ b/configure.ac @@ -8,7 +8,7 @@ AC_PREREQ(2.59) # VERSION define is not used by the code. It gets a version string # from 'git describe'; see src/ceph_ver.[ch] -AC_INIT([ceph], [0.67-rc1], [ceph-devel@vger.kernel.org]) +AC_INIT([ceph], [0.67], [ceph-devel@vger.kernel.org]) # Create release string. Used with VERSION for RPMs. RPM_RELEASE=0 From 76cd7ac1c2094b34ad36bea89b2246fa90eb2f6d Mon Sep 17 00:00:00 2001 From: Joao Eduardo Luis Date: Wed, 24 Jul 2013 12:00:28 +0100 Subject: [PATCH 02/17] mon: OSDMonitor: fix a bug introduced on 97462a32 Fixes: #5737 Backport: cuttlefish Signed-off-by: Joao Eduardo Luis Reviewed-by: Sage Weil --- src/mon/OSDMonitor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index b02a365125278..f2e51e4068512 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -141,7 +141,7 @@ void OSDMonitor::update_from_paxos(bool *need_bootstrap) latest_full = 0; for (version_t v = lc; v >= fc; v--) { - string full_key = "full_" + stringify(latest_full); + string full_key = "full_" + stringify(v); if (mon->store->exists(get_service_name(), full_key)) { dout(10) << __func__ << " found latest full map v " << v << dendl; latest_full = v; From a7a7d3fc8a2ba4a30ef136a32f2903d157b3e19a Mon Sep 17 00:00:00 2001 From: Joao Eduardo Luis Date: Mon, 17 Jun 2013 14:43:36 +0100 Subject: [PATCH 03/17] test: test_store_tool: global init before using LevelDBStore Fixes a segfault Signed-off-by: Joao Eduardo Luis Reviewed-by: Sage Weil --- .../test_store_tool/test_store_tool.cc | 26 ++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/src/test/ObjectMap/test_store_tool/test_store_tool.cc b/src/test/ObjectMap/test_store_tool/test_store_tool.cc index 4ab6289288042..ace91220df66f 100644 --- a/src/test/ObjectMap/test_store_tool/test_store_tool.cc +++ b/src/test/ObjectMap/test_store_tool/test_store_tool.cc @@ -19,6 +19,12 @@ #include "os/LevelDBStore.h" +#include "common/ceph_argparse.h" +#include "global/global_init.h" +#include "common/errno.h" +#include "common/safe_io.h" +#include "common/config.h" + using namespace std; class StoreTool @@ -98,15 +104,27 @@ void usage(const char *pname) << std::endl; } -int main(int argc, char *argv[]) +int main(int argc, const char *argv[]) { - if (argc < 3) { + vector args; + argv_to_vec(argc, argv, args); + env_to_vec(args); + + global_init( + NULL, args, + CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0); + common_init_finish(g_ceph_context); + + + if (args.size() < 2) { usage(argv[0]); return 1; } - string path(argv[1]); - string cmd(argv[2]); + string path(args[0]); + string cmd(args[1]); + + std::cout << "path: " << path << " cmd " << cmd << std::endl; StoreTool st(path); From c2131d4047156aa2964581c9dbd93846382a07e7 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 24 Jul 2013 11:55:42 -0700 Subject: [PATCH 04/17] mon/OSDMonitor: search for latest full osdmap if record version is missing In 97462a3213e5e15812c79afc0f54d697b6c498b1 we tried to search for a recent full osdmap but were looking at the wrong key. If full_0 was present we could record that the latest full map was last_committed even though it wasn't present. This is fixed in 76cd7ac1c, but we need to compensate for when get_version_latest_full() gives us a back version number by repeating the search. Fixes: #5737 Signed-off-by: Sage Weil Reviewed-by: Joao Eduardo Luis --- src/mon/OSDMonitor.cc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index f2e51e4068512..c6db052a591fe 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -126,6 +126,16 @@ void OSDMonitor::update_from_paxos(bool *need_bootstrap) if (latest_full == 0 && get_first_committed() > 1) latest_full = get_first_committed(); + if (latest_full > 0) { + // make sure we can really believe get_version_latest_full(); see + // 76cd7ac1c2094b34ad36bea89b2246fa90eb2f6d + bufferlist test; + get_version_full(latest_full, test); + if (test.length() == 0) { + dout(10) << __func__ << " ignoring recorded latest_full as it is missing; fallback to search" << dendl; + latest_full = 0; + } + } if (get_first_committed() > 1 && latest_full < get_first_committed()) { /* a bug introduced in 7fb3804fb860dcd0340dd3f7c39eec4315f8e4b6 would lead From 92855064b820f4dc02d11ba8d0cd0199b44dc49c Mon Sep 17 00:00:00 2001 From: Dan Mick Date: Thu, 18 Jul 2013 16:33:16 -0700 Subject: [PATCH 05/17] ceph.in/ceph_argparse.py: move find_cmd_target() to ceph_argparse.py Signed-off-by: Dan Mick --- src/ceph.in | 28 ---------------------------- src/pybind/ceph_argparse.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/ceph.in b/src/ceph.in index dbb7fb5a8cdc5..27d96f46355ec 100755 --- a/src/ceph.in +++ b/src/ceph.in @@ -389,34 +389,6 @@ PGID_MATCH = { \ 'return_key': 'pgid', \ } -def find_cmd_target(childargs): - """ - Using a minimal validation, figure out whether the command - should be sent to a monitor or an osd. We do this before even - asking for the 'real' set of command signatures, so we can ask the - right daemon. - Returns ('osd', osdid), ('pg', pgid), or ('mon', '') - """ - sig = parse_funcsig(['tell', {'name':'target','type':'CephName'}]) - try: - valid_dict = validate(childargs, sig, partial=True); - if len(valid_dict) == 2: - name = CephName() - name.valid(valid_dict['target']) - return name.nametype, name.nameid - except ArgumentError: - pass - - sig = parse_funcsig(['pg', {'name':'pgid','type':'CephPgid'}]) - try: - valid_dict = validate(childargs, sig, partial=True); - if len(valid_dict) == 2: - return 'pg', valid_dict['pgid'] - except ArgumentError: - pass - - return 'mon', '' - def complete(sigdict, args, target): """ Command completion. Match as much of [args] as possible, diff --git a/src/pybind/ceph_argparse.py b/src/pybind/ceph_argparse.py index 72b36dd50a536..907d2ec2b33f8 100644 --- a/src/pybind/ceph_argparse.py +++ b/src/pybind/ceph_argparse.py @@ -887,6 +887,34 @@ def validate_command(parsed_args, sigdict, args, verbose=False): return valid_dict +def find_cmd_target(childargs): + """ + Using a minimal validation, figure out whether the command + should be sent to a monitor or an osd. We do this before even + asking for the 'real' set of command signatures, so we can ask the + right daemon. + Returns ('osd', osdid), ('pg', pgid), or ('mon', '') + """ + sig = parse_funcsig(['tell', {'name':'target','type':'CephName'}]) + try: + valid_dict = validate(childargs, sig, partial=True); + if len(valid_dict) == 2: + name = CephName() + name.valid(valid_dict['target']) + return name.nametype, name.nameid + except ArgumentError: + pass + + sig = parse_funcsig(['pg', {'name':'pgid','type':'CephPgid'}]) + try: + valid_dict = validate(childargs, sig, partial=True); + if len(valid_dict) == 2: + return 'pg', valid_dict['pgid'] + except ArgumentError: + pass + + return 'mon', '' + def send_command(cluster, target=('mon', ''), cmd=[], inbuf='', timeout=0, verbose=False): """ From e83942086c394fcae2ebdb98cae78defeccbd428 Mon Sep 17 00:00:00 2001 From: Dan Mick Date: Tue, 23 Jul 2013 00:16:40 -0700 Subject: [PATCH 06/17] MonCommands.h: osd pool delete "rw" perms, osd pool set no longer exp Signed-off-by: Dan Mick --- src/mon/MonCommands.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/mon/MonCommands.h b/src/mon/MonCommands.h index 5b950ca1aefe5..0980893bf9b27 100644 --- a/src/mon/MonCommands.h +++ b/src/mon/MonCommands.h @@ -482,7 +482,7 @@ COMMAND("osd pool delete " \ "name=pool2,type=CephPoolname " \ "name=sure,type=CephChoices,strings=--yes-i-really-really-mean-it", \ "delete pool (say pool twice, add --yes-i-really-really-mean-it)", \ - "osd", "r", "cli,rest") + "osd", "rw", "cli,rest") COMMAND("osd pool rename " \ "name=srcpool,type=CephPoolname " \ "name=destpool,type=CephPoolname", \ @@ -494,8 +494,7 @@ COMMAND("osd pool get " \ COMMAND("osd pool set " \ "name=pool,type=CephPoolname " \ "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset " \ - "name=val,type=CephInt " \ - "name=sure,type=CephChoices,strings=--allow-experimental-feature,req=false", \ + "name=val,type=CephInt", \ "set pool parameter to ", "osd", "rw", "cli,rest") // 'val' is a CephString because it can include a unit. Perhaps // there should be a Python type for validation/conversion of strings From 1579c344fed01ee48167786e841f093f21143770 Mon Sep 17 00:00:00 2001 From: Dan Mick Date: Tue, 23 Jul 2013 00:15:32 -0700 Subject: [PATCH 07/17] rest/test.py: osd lspools should be a 'GET' Signed-off-by: Dan Mick --- qa/workunits/rest/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qa/workunits/rest/test.py b/qa/workunits/rest/test.py index fc7e82c9b6161..bcad4b03447a1 100755 --- a/qa/workunits/rest/test.py +++ b/qa/workunits/rest/test.py @@ -297,10 +297,10 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('osd/pool/rmsnap?pool=data&snap=datasnap', 'PUT', 200, '') expect('osd/pool/create?pool=data2&pg_num=10', 'PUT', 200, '') - r = expect('osd/lspools', 'PUT', 200, 'json', JSONHDR) + r = expect('osd/lspools', 'GET', 200, 'json', JSONHDR) assert([p for p in r.json['output'] if p['poolname'] == 'data2']) expect('osd/pool/rename?srcpool=data2&destpool=data3', 'PUT', 200, '') - r = expect('osd/lspools', 'PUT', 200, 'json', JSONHDR) + r = expect('osd/lspools', 'GET', 200, 'json', JSONHDR) assert([p for p in r.json['output'] if p['poolname'] == 'data3']) expect('osd/pool/delete?pool=data3', 'PUT', 400, '') expect('osd/pool/delete?pool=data3&pool2=data3&sure=--yes-i-really-really-mean-it', 'PUT', 200, '') From 085f129a6f55a897f9746e6aaa1ef1abef75123a Mon Sep 17 00:00:00 2001 From: Dan Mick Date: Tue, 23 Jul 2013 22:11:45 -0700 Subject: [PATCH 08/17] ceph.in: remove dead code Signed-off-by: Dan Mick --- src/ceph.in | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/ceph.in b/src/ceph.in index 27d96f46355ec..0d361e1c76ce6 100755 --- a/src/ceph.in +++ b/src/ceph.in @@ -378,17 +378,6 @@ def new_style_command(parsed_args, cmdargs, target, sigdict, inbuf, verbose): return json_command(cluster_handle, target=target, argdict=valid_dict, inbuf=inbuf) -OSD_TELL_MATCH = { \ - 'sig': ['tell', {'name':'target','type':'CephName'}], \ - 'matchnum': 2, \ - 'return_key': 'target', \ -} -PGID_MATCH = { \ - 'sig': ['pg', {'name':'pgid','type':'CephPgid'}], \ - 'matchnum': 2, \ - 'return_key': 'pgid', \ -} - def complete(sigdict, args, target): """ Command completion. Match as much of [args] as possible, From d7df620b5723d6d7d41338be330ca293f62aa23c Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 24 Jul 2013 13:56:10 -0700 Subject: [PATCH 09/17] global/signal_handler: poll on the control pipe, too We also need to poll the control fd/pipe so that we restart the poll loop when new signal handlers are added. This was broken by commit 8e4a78f1. Fixes: #5742 Signed-off-by: Sage Weil Reviewed-by: Yehuda Sadeh --- src/global/signal_handler.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/global/signal_handler.cc b/src/global/signal_handler.cc index d403637002b15..ce604fe1e5d42 100644 --- a/src/global/signal_handler.cc +++ b/src/global/signal_handler.cc @@ -195,6 +195,10 @@ struct SignalHandler : public Thread { lock.Lock(); int num_fds = 0; + fds[num_fds].fd = pipefd[0]; + fds[num_fds].events = POLLIN | POLLOUT | POLLERR; + fds[num_fds].revents = 0; + ++num_fds; for (unsigned i=0; i<32; i++) { if (handlers[i]) { fds[num_fds].fd = handlers[i]->pipefd[0]; From 0b8cad1805bd72c7359b21cfdbc05c2c7b887e44 Mon Sep 17 00:00:00 2001 From: Dan Mick Date: Wed, 24 Jul 2013 14:32:41 -0700 Subject: [PATCH 10/17] ceph_rest_api.py: allow config section fallback Try clientname, then 'client', then 'global Fixes: #5743 Signed-off-by: Dan Mick Reviewed-by: Sage Weil --- src/pybind/ceph_rest_api.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/pybind/ceph_rest_api.py b/src/pybind/ceph_rest_api.py index fdfe84ee3cb3b..a379e352b1ff2 100755 --- a/src/pybind/ceph_rest_api.py +++ b/src/pybind/ceph_rest_api.py @@ -91,10 +91,13 @@ def load(path): raise EnvironmentError('No conf file found for "{0}"'.format(clustername)) def get_conf(cfg, clientname, key): - try: - return cfg.get(clientname, 'restapi_' + key) - except ConfigParser.NoOptionError: - return None + fullkey = 'restapi_' + key + for sectionname in clientname, 'client', 'global': + try: + return cfg.get(sectionname, fullkey) + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + pass + return None # XXX this is done globally, and cluster connection kept open; there # are facilities to pass around global info to requests and to From fd1fd664d6102a2a96b27e8ca9933b54ac626ecb Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 24 Jul 2013 14:46:24 -0700 Subject: [PATCH 11/17] ceph-disk: use new get_dev_path helper for list Backport: cuttlefish Signed-off-by: Sage Weil Reviewed-by: Dan Mick Tested-by: Olivier Bonvalet --- src/ceph-disk | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ceph-disk b/src/ceph-disk index db988b0d5e346..b4a9e68dad756 100755 --- a/src/ceph-disk +++ b/src/ceph-disk @@ -1960,7 +1960,7 @@ def main_list(args): journal_map = {} for base, parts in sorted(partmap.iteritems()): for p in parts: - dev = '/dev/' + p + dev = get_dev_path(p) part_uuid = get_partition_uuid(dev) if part_uuid: uuid_map[part_uuid] = dev @@ -1980,11 +1980,11 @@ def main_list(args): for base, parts in sorted(partmap.iteritems()): if parts: - print '/dev/%s :' % base + print '%s :' % get_dev_path(base) for p in sorted(parts): - list_dev('/dev/' + p, uuid_map, journal_map) + list_dev(get_dev_path(p), uuid_map, journal_map) else: - list_dev('/dev/' + base, uuid_map, journal_map) + list_dev(get_dev_path(base), uuid_map, journal_map) ########################### From fe2019cc973ce8c9d6dd80c878dcb8e02f4a2695 Mon Sep 17 00:00:00 2001 From: Dan Mick Date: Wed, 24 Jul 2013 16:08:30 -0700 Subject: [PATCH 12/17] rest/test.py: cope with older requests.py versions Older requests.py didn't have r.json (or r.json()); avoid by decoding the json myself Signed-off-by: Dan Mick --- qa/workunits/rest/test.py | 111 +++++++++++++++++++------------------- 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/qa/workunits/rest/test.py b/qa/workunits/rest/test.py index bcad4b03447a1..60dabab9678fe 100755 --- a/qa/workunits/rest/test.py +++ b/qa/workunits/rest/test.py @@ -1,8 +1,8 @@ #!/usr/bin/python import exceptions +import json import os -# import nosetests import requests import subprocess import sys @@ -43,9 +43,10 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): if contenttype.startswith('application'): if r_contenttype == 'application/json': - # may raise try: - assert(r.json != None) + # older requests.py doesn't create r.myjson; create it myself + r.myjson = json.loads(r.content) + assert(r.myjson != None) except Exception as e: fail(r, 'Invalid JSON returned: "{0}"'.format(str(e))) @@ -83,7 +84,7 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): assert('client.xx' in r.content) r = expect('auth/list.json', 'GET', 200, 'json') - dictlist = r.json['output']['auth_dump'] + dictlist = r.myjson['output']['auth_dump'] xxdict = [d for d in dictlist if d['entity'] == 'client.xx'][0] assert(xxdict) assert('caps' in xxdict) @@ -97,7 +98,7 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('auth/caps?entity=client.xx&caps=osd&caps=allow rw', 'PUT', 200, 'json', JSONHDR) r = expect('auth/list.json', 'GET', 200, 'json') - dictlist = r.json['output']['auth_dump'] + dictlist = r.myjson['output']['auth_dump'] xxdict = [d for d in dictlist if d['entity'] == 'client.xx'][0] assert(xxdict) assert('caps' in xxdict) @@ -116,7 +117,7 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('auth/del?entity=client.xx', 'PUT', 200, 'json', JSONHDR) r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - assert('epoch' in r.json['output']) + assert('epoch' in r.myjson['output']) assert('GLOBAL' in expect('df', 'GET', 200, 'plain').content) assert('CATEGORY' in expect('df?detail=detail', 'GET', 200, 'plain').content) @@ -124,12 +125,12 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): assert('CATEGORY' in expect('df?detail', 'GET', 200, 'plain').content) r = expect('df', 'GET', 200, 'json', JSONHDR) - assert('total_used' in r.json['output']['stats']) + assert('total_used' in r.myjson['output']['stats']) r = expect('df', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/stats/stats/total_used') is not None) r = expect('df?detail', 'GET', 200, 'json', JSONHDR) - assert('rd_kb' in r.json['output']['pools'][0]['stats']) + assert('rd_kb' in r.myjson['output']['pools'][0]['stats']) r = expect('df?detail', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/stats/pools/pool/stats/rd_kb') is not None) @@ -149,7 +150,7 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('mds/compat/rm_incompat?feature=4', 'PUT', 200, '') r = expect('mds/compat/show', 'GET', 200, 'json', JSONHDR) - assert('incompat' in r.json['output']) + assert('incompat' in r.myjson['output']) r = expect('mds/compat/show', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/mds_compat/incompat') is not None) @@ -157,8 +158,8 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('mds/deactivate?who=2', 'PUT', 400, '') r = expect('mds/dump.json', 'GET', 200, 'json') - assert('created' in r.json['output']) - current_epoch = r.json['output']['epoch'] + assert('created' in r.myjson['output']) + current_epoch = r.myjson['output']['epoch'] r = expect('mds/dump.xml', 'GET', 200, 'xml') assert(r.tree.find('output/mdsmap/created') is not None) @@ -171,7 +172,7 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): 200, '') expect('osd/pool/create?pool=data2&pg_num=10', 'PUT', 200, '') r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - pools = r.json['output']['pools'] + pools = r.myjson['output']['pools'] poolnum = None for p in pools: if p['pool_name'] == 'data2': @@ -185,10 +186,10 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): '&sure=--yes-i-really-really-mean-it', 'PUT', 200, '') expect('mds/set_max_mds?maxmds=4', 'PUT', 200, '') r = expect('mds/dump.json', 'GET', 200, 'json') - assert(r.json['output']['max_mds'] == 4) + assert(r.myjson['output']['max_mds'] == 4) expect('mds/set_max_mds?maxmds=3', 'PUT', 200, '') r = expect('mds/stat.json', 'GET', 200, 'json') - assert('info' in r.json['output']['mdsmap']) + assert('info' in r.myjson['output']['mdsmap']) r = expect('mds/stat.xml', 'GET', 200, 'xml') assert(r.tree.find('output/mds_stat/mdsmap/info') is not None) @@ -199,17 +200,17 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): r = expect('mon/getmap', 'GET', 200, '') assert(len(r.content) != 0) r = expect('mon_status.json', 'GET', 200, 'json') - assert('name' in r.json['output']) + assert('name' in r.myjson['output']) r = expect('mon_status.xml', 'GET', 200, 'xml') assert(r.tree.find('output/mon_status/name') is not None) bl = '192.168.0.1:0/1000' expect('osd/blacklist?blacklistop=add&addr=' + bl, 'PUT', 200, '') r = expect('osd/blacklist/ls.json', 'GET', 200, 'json') - assert([b for b in r.json['output'] if b['addr'] == bl]) + assert([b for b in r.myjson['output'] if b['addr'] == bl]) expect('osd/blacklist?blacklistop=rm&addr=' + bl, 'PUT', 200, '') r = expect('osd/blacklist/ls.json', 'GET', 200, 'json') - assert([b for b in r.json['output'] if b['addr'] == bl] == []) + assert([b for b in r.myjson['output'] if b['addr'] == bl] == []) expect('osd/crush/tunables?profile=legacy', 'PUT', 200, '') expect('osd/crush/tunables?profile=bobtail', 'PUT', 200, '') @@ -222,73 +223,73 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('osd/down?ids=0', 'PUT', 200, '') r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['osds'][0]['osd'] == 0) - assert(r.json['output']['osds'][0]['up'] == 0) + assert(r.myjson['output']['osds'][0]['osd'] == 0) + assert(r.myjson['output']['osds'][0]['up'] == 0) expect('osd/unset?key=noup', 'PUT', 200, '') for i in range(0,100): r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['osds'][0]['osd'] == 0) - if r.json['output']['osds'][0]['up'] == 1: + assert(r.myjson['output']['osds'][0]['osd'] == 0) + if r.myjson['output']['osds'][0]['up'] == 1: break else: print >> sys.stderr, "waiting for osd.0 to come back up" time.sleep(10) r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['osds'][0]['osd'] == 0) - assert(r.json['output']['osds'][0]['up'] == 1) + assert(r.myjson['output']['osds'][0]['osd'] == 0) + assert(r.myjson['output']['osds'][0]['up'] == 1) r = expect('osd/find?id=1', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['osd'] == 1) + assert(r.myjson['output']['osd'] == 1) expect('osd/out?ids=1', 'PUT', 200, '') r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['osds'][1]['osd'] == 1) - assert(r.json['output']['osds'][1]['in'] == 0) + assert(r.myjson['output']['osds'][1]['osd'] == 1) + assert(r.myjson['output']['osds'][1]['in'] == 0) expect('osd/in?ids=1', 'PUT', 200, '') r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['osds'][1]['osd'] == 1) - assert(r.json['output']['osds'][1]['in'] == 1) + assert(r.myjson['output']['osds'][1]['osd'] == 1) + assert(r.myjson['output']['osds'][1]['in'] == 1) r = expect('osd/find?id=0', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['osd'] == 0) + assert(r.myjson['output']['osd'] == 0) r = expect('osd/getmaxosd', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/getmaxosd/max_osd') is not None) r = expect('osd/getmaxosd', 'GET', 200, 'json', JSONHDR) - saved_maxosd = r.json['output']['max_osd'] + saved_maxosd = r.myjson['output']['max_osd'] expect('osd/setmaxosd?newmax=10', 'PUT', 200, '') r = expect('osd/getmaxosd', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['max_osd'] == 10) + assert(r.myjson['output']['max_osd'] == 10) expect('osd/setmaxosd?newmax={0}'.format(saved_maxosd), 'PUT', 200, '') r = expect('osd/getmaxosd', 'GET', 200, 'json', JSONHDR) - assert(r.json['output']['max_osd'] == saved_maxosd) + assert(r.myjson['output']['max_osd'] == saved_maxosd) r = expect('osd/create', 'PUT', 200, 'json', JSONHDR) - assert('osdid' in r.json['output']) - osdid = r.json['output']['osdid'] + assert('osdid' in r.myjson['output']) + osdid = r.myjson['output']['osdid'] expect('osd/lost?id={0}'.format(osdid), 'PUT', 400, '') expect('osd/lost?id={0}&sure=--yes-i-really-mean-it'.format(osdid), 'PUT', 200, 'json', JSONHDR) expect('osd/rm?ids={0}'.format(osdid), 'PUT', 200, '') r = expect('osd/ls', 'GET', 200, 'json', JSONHDR) - assert(isinstance(r.json['output'], list)) + assert(isinstance(r.myjson['output'], list)) r = expect('osd/ls', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/osds/osd') is not None) expect('osd/pause', 'PUT', 200, '') r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - assert('pauserd,pausewr' in r.json['output']['flags']) + assert('pauserd,pausewr' in r.myjson['output']['flags']) expect('osd/unpause', 'PUT', 200, '') r = expect('osd/dump', 'GET', 200, 'json', JSONHDR) - assert('pauserd,pausewr' not in r.json['output']['flags']) + assert('pauserd,pausewr' not in r.myjson['output']['flags']) r = expect('osd/tree', 'GET', 200, 'json', JSONHDR) - assert('nodes' in r.json['output']) + assert('nodes' in r.myjson['output']) r = expect('osd/tree', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/tree/nodes') is not None) expect('osd/pool/mksnap?pool=data&snap=datasnap', 'PUT', 200, '') @@ -298,20 +299,20 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('osd/pool/create?pool=data2&pg_num=10', 'PUT', 200, '') r = expect('osd/lspools', 'GET', 200, 'json', JSONHDR) - assert([p for p in r.json['output'] if p['poolname'] == 'data2']) + assert([p for p in r.myjson['output'] if p['poolname'] == 'data2']) expect('osd/pool/rename?srcpool=data2&destpool=data3', 'PUT', 200, '') r = expect('osd/lspools', 'GET', 200, 'json', JSONHDR) - assert([p for p in r.json['output'] if p['poolname'] == 'data3']) + assert([p for p in r.myjson['output'] if p['poolname'] == 'data3']) expect('osd/pool/delete?pool=data3', 'PUT', 400, '') expect('osd/pool/delete?pool=data3&pool2=data3&sure=--yes-i-really-really-mean-it', 'PUT', 200, '') r = expect('osd/stat', 'GET', 200, 'json', JSONHDR) - assert('num_up_osds' in r.json['output']) + assert('num_up_osds' in r.myjson['output']) r = expect('osd/stat', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/osdmap/num_up_osds') is not None) r = expect('osd/ls', 'GET', 200, 'json', JSONHDR) - for osdid in r.json['output']: + for osdid in r.myjson['output']: # XXX no tell yet # expect('tell?target=osd.{0}&args=version'.format(osdid), 'PUT', # 200, '') @@ -321,7 +322,7 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('pg/debug?debugop=degraded_pgs_exist', 'GET', 200, '') expect('pg/deep-scrub?pgid=0.0', 'PUT', 200, '') r = expect('pg/dump', 'GET', 200, 'json', JSONHDR) - assert('pg_stats_sum' in r.json['output']) + assert('pg_stats_sum' in r.myjson['output']) r = expect('pg/dump', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/pg_map/pg_stats_sum') is not None) @@ -335,8 +336,8 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): assert(len(r.content) != 0) r = expect('pg/map?pgid=0.0', 'GET', 200, 'json', JSONHDR) - assert('acting' in r.json['output']) - assert(r.json['output']['pgid'] == '0.0') + assert('acting' in r.myjson['output']) + assert(r.myjson['output']['pgid'] == '0.0') r = expect('pg/map?pgid=0.0', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/pg_map/acting') is not None) assert(r.tree.find('output/pg_map/pgid').text == '0.0') @@ -348,15 +349,15 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): expect('pg/set_full_ratio?ratio=0.90', 'PUT', 200, '') r = expect('pg/dump', 'GET', 200, 'json', JSONHDR) - assert(float(r.json['output']['full_ratio']) == 0.90) + assert(float(r.myjson['output']['full_ratio']) == 0.90) expect('pg/set_full_ratio?ratio=0.95', 'PUT', 200, '') expect('pg/set_nearfull_ratio?ratio=0.90', 'PUT', 200, '') r = expect('pg/dump', 'GET', 200, 'json', JSONHDR) - assert(float(r.json['output']['near_full_ratio']) == 0.90) + assert(float(r.myjson['output']['near_full_ratio']) == 0.90) expect('pg/set_full_ratio?ratio=0.85', 'PUT', 200, '') r = expect('pg/stat', 'GET', 200, 'json', JSONHDR) - assert('pg_stats_sum' in r.json['output']) + assert('pg_stats_sum' in r.myjson['output']) r = expect('pg/stat', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/pg_map/pg_stats_sum') is not None) @@ -367,12 +368,12 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): # report's CRC needs to be handled # r = expect('report', 'GET', 200, 'json', JSONHDR) - # assert('osd_stats' in r.json['output']) + # assert('osd_stats' in r.myjson['output']) # r = expect('report', 'GET', 200, 'xml', XMLHDR) # assert(r.tree.find('output/report/osdmap') is not None) r = expect('status', 'GET', 200, 'json', JSONHDR) - assert('osdmap' in r.json['output']) + assert('osdmap' in r.myjson['output']) r = expect('status', 'GET', 200, 'xml', XMLHDR) assert(r.tree.find('output/status/osdmap') is not None) @@ -393,21 +394,21 @@ def expect(url, method, respcode, contenttype, extra_hdrs=None, data=None): for v in ['pg_num', 'pgp_num', 'size', 'min_size', 'crash_replay_interval', 'crush_ruleset']: r = expect('osd/pool/get.json?pool=data&var=' + v, 'GET', 200, 'json') - assert(v in r.json['output']) + assert(v in r.myjson['output']) r = expect('osd/pool/get.json?pool=data&var=size', 'GET', 200, 'json') - assert(r.json['output']['size'] == 2) + assert(r.myjson['output']['size'] == 2) expect('osd/pool/set?pool=data&var=size&val=3', 'PUT', 200, 'plain') r = expect('osd/pool/get.json?pool=data&var=size', 'GET', 200, 'json') - assert(r.json['output']['size'] == 3) + assert(r.myjson['output']['size'] == 3) expect('osd/pool/set?pool=data&var=size&val=2', 'PUT', 200, 'plain') r = expect('osd/pool/get.json?pool=data&var=size', 'GET', 200, 'json') - assert(r.json['output']['size'] == 2) + assert(r.myjson['output']['size'] == 2) r = expect('osd/pool/get.json?pool=rbd&var=crush_ruleset', 'GET', 200, 'json') - assert(r.json['output']['crush_ruleset'] == 2) + assert(r.myjson['output']['crush_ruleset'] == 2) expect('osd/thrash?num_epochs=10', 'PUT', 200, '') print 'OK' From 0018b45f3c57b00709b1cf7889089a21c047e993 Mon Sep 17 00:00:00 2001 From: Gary Lowell Date: Wed, 24 Jul 2013 16:18:56 -0700 Subject: [PATCH 13/17] v0.67-rc2 --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index cfcf0491dd8d6..7b814f0da90ec 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +ceph (0.67-rc2-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Wed, 24 Jul 2013 16:18:33 -0700 + ceph (0.67-rc1-1) precise; urgency=low * New upstream release From 41930b5e8f0bfcc695c8548108b692fd43ca0195 Mon Sep 17 00:00:00 2001 From: Dan Mick Date: Tue, 23 Jul 2013 18:29:59 -0700 Subject: [PATCH 14/17] ceph.spec.in, debian/control: python-ceph depends on python-flask Signed-off-by: Dan Mick --- ceph.spec.in | 1 + debian/control | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ceph.spec.in b/ceph.spec.in index 0a4db619023d4..fb8bcca4b7140 100644 --- a/ceph.spec.in +++ b/ceph.spec.in @@ -188,6 +188,7 @@ License: LGPL-2.0 Requires: librados2 = %{version}-%{release} Requires: librbd1 = %{version}-%{release} Requires: libcephfs1 = %{version}-%{release} +Requires: python-flask %if 0%{defined suse_version} %py_requires %endif diff --git a/debian/control b/debian/control index 241fabb5e4f30..4a12374bc61dd 100644 --- a/debian/control +++ b/debian/control @@ -389,7 +389,7 @@ Description: Ceph test and benchmarking tools. Package: python-ceph Architecture: linux-any Section: python -Depends: librados2, librbd1, ${misc:Depends}, ${python:Depends} +Depends: librados2, librbd1, python-flask, ${misc:Depends}, ${python:Depends} X-Python-Version: >= 2.6 Description: Python libraries for the Ceph distributed filesystem Ceph is a distributed storage and network file system designed to provide From 37a4c4af54879512429bb114285bcb4c7c3488d5 Mon Sep 17 00:00:00 2001 From: Samuel Just Date: Tue, 23 Jul 2013 17:34:25 -0700 Subject: [PATCH 15/17] test/filestore/store_test: add test for 5723 Signed-off-by: Samuel Just Reviewed-by: Sage Weil --- src/os/LFNIndex.cc | 11 +++-- src/test/filestore/store_test.cc | 71 +++++++++++++++++++++++++++++++- 2 files changed, 77 insertions(+), 5 deletions(-) diff --git a/src/os/LFNIndex.cc b/src/os/LFNIndex.cc index edf361a44f0fa..09d0f02267f8c 100644 --- a/src/os/LFNIndex.cc +++ b/src/os/LFNIndex.cc @@ -75,16 +75,19 @@ int LFNIndex::init() int LFNIndex::created(const hobject_t &hoid, const char *path) { + WRAP_RETRY( vector path_comp; string short_name; - int r; r = decompose_full_path(path, &path_comp, 0, &short_name); if (r < 0) - return r; + goto out; r = lfn_created(path_comp, hoid, short_name); if (r < 0) - return r; - return _created(path_comp, hoid, short_name); + goto out; + r = _created(path_comp, hoid, short_name); + if (r < 0) + goto out; + ); } int LFNIndex::unlink(const hobject_t &hoid) diff --git a/src/test/filestore/store_test.cc b/src/test/filestore/store_test.cc index 87482ef702d8c..80c775052eca3 100644 --- a/src/test/filestore/store_test.cc +++ b/src/test/filestore/store_test.cc @@ -829,6 +829,75 @@ TEST_F(StoreTest, ColSplitTest3) { } #endif +/** + * This test tests adding two different groups + * of objects, each with 1 common prefix and 1 + * different prefix. We then remove half + * in order to verify that the merging correctly + * stops at the common prefix subdir. See bug + * #5273 */ +TEST_F(StoreTest, TwoHash) { + coll_t cid("asdf"); + int r; + { + ObjectStore::Transaction t; + t.create_collection(cid); + r = store->apply_transaction(t); + ASSERT_EQ(r, 0); + } + std::cout << "Making objects" << std::endl; + for (int i = 0; i < 360; ++i) { + ObjectStore::Transaction t; + hobject_t o; + if (i < 8) { + o.hash = (i << 16) | 0xA1; + t.touch(cid, o); + } + o.hash = (i << 16) | 0xB1; + t.touch(cid, o); + r = store->apply_transaction(t); + ASSERT_EQ(r, 0); + } + std::cout << "Removing half" << std::endl; + for (int i = 1; i < 8; ++i) { + ObjectStore::Transaction t; + hobject_t o; + o.hash = (i << 16) | 0xA1; + t.remove(cid, o); + r = store->apply_transaction(t); + ASSERT_EQ(r, 0); + } + std::cout << "Checking" << std::endl; + for (int i = 1; i < 8; ++i) { + ObjectStore::Transaction t; + hobject_t o; + o.hash = (i << 16) | 0xA1; + bool exists = store->exists(cid, o); + ASSERT_EQ(exists, false); + } + { + hobject_t o; + o.hash = 0xA1; + bool exists = store->exists(cid, o); + ASSERT_EQ(exists, true); + } + std::cout << "Cleanup" << std::endl; + for (int i = 0; i < 360; ++i) { + ObjectStore::Transaction t; + hobject_t o; + o.hash = (i << 16) | 0xA1; + t.remove(cid, o); + o.hash = (i << 16) | 0xB1; + t.remove(cid, o); + r = store->apply_transaction(t); + ASSERT_EQ(r, 0); + } + ObjectStore::Transaction t; + t.remove_collection(cid); + r = store->apply_transaction(t); + ASSERT_EQ(r, 0); +} + // // support tests for qa/workunits/filestore/filestore.sh // @@ -892,7 +961,7 @@ int main(int argc, char **argv) { global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0); common_init_finish(g_ceph_context); g_ceph_context->_conf->set_val("osd_journal_size", "400"); - g_ceph_context->_conf->set_val("filestore_index_retry_probability", "1"); + g_ceph_context->_conf->set_val("filestore_index_retry_probability", "0.5"); g_ceph_context->_conf->set_val("filestore_op_thread_timeout", "1000"); g_ceph_context->_conf->set_val("filestore_op_thread_suicide_timeout", "10000"); g_ceph_context->_conf->apply_changes(NULL); From 0dc3efdd885377a07987d868af5bb7a38245c90b Mon Sep 17 00:00:00 2001 From: Samuel Just Date: Tue, 23 Jul 2013 18:04:40 -0700 Subject: [PATCH 16/17] HashIndex: reset attr upon split or merge completion A replay of an in progress merge or split might make our counts unreliable. Fixes: #5723 Signed-off-by: Samuel Just Reviewed-by: Sage Weil --- src/os/HashIndex.cc | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/src/os/HashIndex.cc b/src/os/HashIndex.cc index 86a912bbef2b0..c279bab3a601a 100644 --- a/src/os/HashIndex.cc +++ b/src/os/HashIndex.cc @@ -447,18 +447,7 @@ int HashIndex::complete_merge(const vector &path, subdir_info_s info) { r = move_objects(path, dst); if (r < 0) return r; - - map objects_dst; - r = list_objects(dst, 0, 0, &objects_dst); - if (r < 0) - return r; - set subdirs; - r = list_subdirs(dst, &subdirs); - if (r < 0) - return r; - dstinfo.objs = objects_dst.size(); - dstinfo.subdirs = subdirs.size() - 1; - r = set_info(dst, dstinfo); + r = reset_attr(dst); if (r < 0) return r; r = remove_path(path); @@ -576,7 +565,7 @@ int HashIndex::complete_split(const vector &path, subdir_info_s info) { if (r < 0) return r; info.objs = objects.size(); - r = set_info(path, info); + r = reset_attr(path); if (r < 0) return r; r = fsync_dir(path); From 870c474c5348831fcb13797d164f49682918fb30 Mon Sep 17 00:00:00 2001 From: Samuel Just Date: Tue, 23 Jul 2013 13:51:26 -0700 Subject: [PATCH 17/17] FileStore::_collection_rename: fix global replay guard If the replay is being replayed, we might have already performed the rename, skip it. Also, we must set the collection replay guard only after we have done the rename. Signed-off-by: Samuel Just Reviewed-by: Sage Weil --- src/os/FileStore.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/os/FileStore.cc b/src/os/FileStore.cc index 17105c11d69b0..4e4847e69170e 100644 --- a/src/os/FileStore.cc +++ b/src/os/FileStore.cc @@ -4304,9 +4304,6 @@ int FileStore::_collection_rename(const coll_t &cid, const coll_t &ncid, get_cdir(cid, old_coll, sizeof(old_coll)); get_cdir(ncid, new_coll, sizeof(new_coll)); - _set_global_replay_guard(cid, spos); - _set_replay_guard(cid, spos); - if (_check_replay_guard(cid, spos) < 0) { return 0; } @@ -4315,6 +4312,16 @@ int FileStore::_collection_rename(const coll_t &cid, const coll_t &ncid, return _collection_remove_recursive(cid, spos); } + if (!collection_exists(cid)) { + if (replaying) { + // already happened + return 0; + } else { + return -ENOENT; + } + } + _set_global_replay_guard(cid, spos); + int ret = 0; if (::rename(old_coll, new_coll)) { if (replaying && !btrfs_stable_commits &&