Skip to content

Commit 275ce89

Browse files
committed
collect cbbackupmgr logs after runs finish
Change-Id: I32a85c46c512df5b80f0687c6f374cc790b873de Reviewed-on: http://review.couchbase.org/c/perfrunner/+/138261 Tested-by: Build Bot <build@couchbase.com> Reviewed-by: <sharujayaram@gmail.com>
1 parent c4df241 commit 275ce89

2 files changed

Lines changed: 94 additions & 53 deletions

File tree

perfrunner/helpers/local.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,16 @@ def cbbackupmgr_backup(master_node: str, cluster_spec: ClusterSpec,
134134
local(cmd)
135135

136136

137+
def cbbackupmgr_collectinfo(cluster_spec: ClusterSpec, archive: str = ''):
138+
logger.info('Running cbbackumgr cbcollect_info on local/host ')
139+
140+
cmd = ('./opt/couchbase/bin/cbbackupmgr collect-logs --archive {} '
141+
'--output-dir {}'.format(archive or cluster_spec.backup, '.'))
142+
143+
logger.info('Running: {}'.format(cmd))
144+
local(cmd)
145+
146+
137147
def get_backup_snapshots(cluster_spec: ClusterSpec) -> List[str]:
138148

139149
logger.info('running cbbackupmgr list/info command ')

perfrunner/tests/tools.py

Lines changed: 84 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,9 @@ def backup_list(self):
6161
local.cbbackupmgr_list(cluster_spec=self.cluster_spec,
6262
snapshots=snapshots)
6363

64+
def collectinfo(self):
65+
local.cbbackupmgr_collectinfo(cluster_spec=self.cluster_spec)
66+
6467
def run(self):
6568
self.extract_tools()
6669

@@ -108,7 +111,10 @@ def _report_kpi(self, time_elapsed):
108111
def run(self):
109112
super().run()
110113

111-
time_elapsed = self.backup()
114+
try:
115+
time_elapsed = self.backup()
116+
finally:
117+
self.collectinfo()
112118

113119
self.report_kpi(time_elapsed)
114120

@@ -179,16 +185,19 @@ def _report_kpi(self, time_elapsed: float, backup_size_difference: float):
179185
def run(self):
180186
super().run()
181187

182-
self.backup()
183-
self.wait_for_persistence()
188+
try:
189+
self.backup()
190+
self.wait_for_persistence()
184191

185-
initial_size = local.calc_backup_size(self.cluster_spec,
186-
rounded=False)
187-
compact_time = self.compact()
188-
compacted_size = local.calc_backup_size(self.cluster_spec,
189-
rounded=False)
190-
# Size differences can be a little small, so go for more precision here
191-
size_diff = round(initial_size - compacted_size, 2)
192+
initial_size = local.calc_backup_size(self.cluster_spec,
193+
rounded=False)
194+
compact_time = self.compact()
195+
compacted_size = local.calc_backup_size(self.cluster_spec,
196+
rounded=False)
197+
# Size differences can be a little small, so go for more precision here
198+
size_diff = round(initial_size - compacted_size, 2)
199+
finally:
200+
self.collectinfo()
192201

193202
self.report_kpi(compact_time, size_diff)
194203

@@ -243,38 +252,41 @@ def _report_kpi(self, time_elapsed: int, backup_size: float):
243252
)
244253

245254
def run(self):
246-
self.extract_tools()
247-
248-
self.load()
249-
self.wait_for_persistence()
250-
self.compact_bucket(wait=True)
251-
self.backup()
252-
253-
initial_backup_size = local.calc_backup_size(self.cluster_spec,
254-
rounded=False)
255+
try:
256+
self.extract_tools()
255257

256-
self.access()
257-
self.wait_for_persistence()
258-
259-
# Define a secondary load. For this we borrow the 'creates' field,
260-
# since load doesn't normally use this anyway.
261-
inc_load = self.test_config.load_settings.creates
262-
workers = self.test_config.load_settings.workers
263-
size = self.test_config.load_settings.size
264-
265-
# New key prefix needed to create incremental dataset.
266-
self.load(settings=LoadSettings({"items": inc_load,
267-
"workers": workers,
268-
"size": size}),
269-
target_iterator=TargetIterator(self.cluster_spec,
270-
self.test_config,
271-
prefix='inc-'))
272-
self.wait_for_persistence()
273-
274-
inc_backup_time = self.backup_with_stats(mode=True)
275-
total_backup_size = local.calc_backup_size(self.cluster_spec,
276-
rounded=False)
277-
inc_backup_size = round(total_backup_size - initial_backup_size, 2)
258+
self.load()
259+
self.wait_for_persistence()
260+
self.compact_bucket(wait=True)
261+
self.backup()
262+
263+
initial_backup_size = local.calc_backup_size(self.cluster_spec,
264+
rounded=False)
265+
266+
self.access()
267+
self.wait_for_persistence()
268+
269+
# Define a secondary load. For this we borrow the 'creates' field,
270+
# since load doesn't normally use this anyway.
271+
inc_load = self.test_config.load_settings.creates
272+
workers = self.test_config.load_settings.workers
273+
size = self.test_config.load_settings.size
274+
275+
# New key prefix needed to create incremental dataset.
276+
self.load(settings=LoadSettings({"items": inc_load,
277+
"workers": workers,
278+
"size": size}),
279+
target_iterator=TargetIterator(self.cluster_spec,
280+
self.test_config,
281+
prefix='inc-'))
282+
self.wait_for_persistence()
283+
284+
inc_backup_time = self.backup_with_stats(mode=True)
285+
total_backup_size = local.calc_backup_size(self.cluster_spec,
286+
rounded=False)
287+
inc_backup_size = round(total_backup_size - initial_backup_size, 2)
288+
finally:
289+
self.collectinfo()
278290

279291
self._report_kpi(inc_backup_time, inc_backup_size)
280292

@@ -320,13 +332,16 @@ def run(self):
320332

321333
self.load()
322334
self.wait_for_persistence()
323-
self.backup() # 1st snapshot
335+
try:
336+
self.backup() # 1st snapshot
324337

325-
self.load()
326-
self.wait_for_persistence()
327-
self.backup(mode=True) # 2nd snapshot
338+
self.load()
339+
self.wait_for_persistence()
340+
self.backup(mode=True) # 2nd snapshot
328341

329-
time_elapsed = self.merge()
342+
time_elapsed = self.merge()
343+
finally:
344+
self.collectinfo()
330345

331346
self.report_kpi(time_elapsed)
332347

@@ -361,7 +376,10 @@ def run(self):
361376

362377
self.flush_buckets()
363378

364-
time_elapsed = self.restore()
379+
try:
380+
time_elapsed = self.restore()
381+
finally:
382+
self.collectinfo()
365383

366384
self.report_kpi(time_elapsed)
367385

@@ -403,9 +421,13 @@ def backup_list(self):
403421
def run(self):
404422
super().run()
405423

406-
self.backup()
407-
local.drop_caches()
408-
list_time = self.backup_list()
424+
try:
425+
self.backup()
426+
local.drop_caches()
427+
list_time = self.backup_list()
428+
finally:
429+
self.collectinfo()
430+
409431
self.report_kpi(list_time)
410432

411433

@@ -469,7 +491,10 @@ def export(self):
469491
def run(self):
470492
super().run()
471493

472-
time_elapsed = self.export()
494+
try:
495+
time_elapsed = self.export()
496+
finally:
497+
self.collectinfo()
473498

474499
self.report_kpi(time_elapsed)
475500

@@ -488,7 +513,10 @@ def run(self):
488513

489514
self.flush_buckets()
490515

491-
time_elapsed = self.import_data()
516+
try:
517+
time_elapsed = self.import_data()
518+
finally:
519+
self.collectinfo()
492520

493521
self.report_kpi(time_elapsed)
494522

@@ -503,7 +531,10 @@ def _report_kpi(self, time_elapsed: float):
503531
def run(self):
504532
self.extract_tools()
505533

506-
time_elapsed = self.import_data()
534+
try:
535+
time_elapsed = self.import_data()
536+
finally:
537+
self.collectinfo()
507538

508539
self.report_kpi(time_elapsed)
509540

0 commit comments

Comments
 (0)