-
Notifications
You must be signed in to change notification settings - Fork 28
Expand file tree
/
Copy pathload_data_from_db.py
More file actions
4204 lines (3629 loc) · 169 KB
/
load_data_from_db.py
File metadata and controls
4204 lines (3629 loc) · 169 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import json
import logging
import math
import multiprocessing
import os
import platform
import pprint
import re
import time as ttime
import warnings
from distutils.version import LooseVersion
import dask.array as da
import h5py
import numpy as np
import pandas as pd
from event_model import Filler
try:
import databroker
except ImportError:
pass
import pyxrf
from ..core.utils import convert_time_to_nexus_string
from .catalog_management import catalog_info, get_catalog
from .scan_metadata import ScanMetadataXRF
pyxrf_version = pyxrf.__version__
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore")
sep_v = os.sep
try:
logger.info(f"Opening catalog: {catalog_info.name!r}")
if not catalog_info.name:
# Attempt to find the configuration file first
config_path = "/etc/pyxrf/pyxrf.json"
if os.path.isfile(config_path):
try:
with open(config_path, "r") as beamline_pyxrf:
beamline_config_pyxrf = json.load(beamline_pyxrf)
catalog_info.set_name(beamline_config_pyxrf["beamline_name"])
except Exception as ex:
raise IOError(f"Error while opening configuration file {config_path!r}") from ex
else:
# Otherwise try to identify the beamline using host name
hostname = platform.node()
catalog_names = {
"xf03id": "HXN",
"xf05id": "SRX",
"xf08bm": "TES",
"xf04bm": "XFM",
}
for k, v in catalog_names.items():
if hostname.startswith(k):
catalog_info.set_name(v)
if not catalog_info.name:
raise Exception("Beamline is not identified")
if catalog_info.name.upper() == "HXN":
from pyxrf.db_config.hxn_db_config import db
elif catalog_info.name.upper() == "SRX":
_failed = False
try:
db = get_catalog("srx")
except Exception as ex:
logger.error("Failed to load Tiled catalog: %s", str(ex))
_failed = True
if _failed:
logger.info("Attempting to open databroker ...")
from pyxrf.db_config.srx_db_config import db
elif catalog_info.name.upper() == "XFM":
from pyxrf.db_config.xfm_db_config import db
elif catalog_info.name.upper() == "TES":
_failed = False
try:
db = get_catalog("tes")
except Exception as ex:
logger.error("Failed to load Tiled catalog: %s", str(ex))
_failed = True
if _failed:
logger.info("Attempting to open databroker ...")
from pyxrf.db_config.tes_db_config import db
else:
db = None
db_analysis = None
print(f"Beamline Database is not used in pyxrf: unknown catalog {catalog_info.name!r}")
except Exception as ex:
db = None
print(f"Beamline Database is not used in pyxrf: {ex}")
def flip_data(input_data, subscan_dims=None):
"""
Flip 2D or 3D array. The flip happens on the second index of shape.
.. warning :: This function mutates the input values.
Parameters
----------
input_data : 2D or 3D array.
Returns
-------
flipped data
"""
new_data = np.asarray(input_data)
data_shape = input_data.shape
if len(data_shape) == 2:
if subscan_dims is None:
new_data[1::2, :] = new_data[1::2, ::-1]
else:
i = 0
for nx, ny in subscan_dims:
start = i + 1
end = i + ny
new_data[start:end:2, :] = new_data[start:end:2, ::-1]
i += ny
if len(data_shape) == 3:
if subscan_dims is None:
new_data[1::2, :, :] = new_data[1::2, ::-1, :]
else:
i = 0
for nx, ny in subscan_dims:
start = i + 1
end = i + ny
new_data[start:end:2, :, :] = new_data[start:end:2, ::-1, :]
i += ny
return new_data
def fetch_run_info(run_id_uid, catalog_name=None):
"""
Fetches key data from start document of the selected run
Parameters
----------
run_id_uid: int or str
Run ID (positive or negative int) or UID (str, full or short) of the run.
catalog_name: str or None
Name of the catalog (e.g. `"srx"`). The function attempts to determine the catalog
name automatically if the parameter is not specified or `None`.
Returns
-------
int or str
Run ID (always positive int) or Run UID (str, always full UID). Returns
`run_id=-1` and `run_uid=""` in case of failure.
Raises
------
RuntimeError
failed to fetch the run from Databroker
"""
try:
if catalog_name:
catalog = get_catalog(catalog_name)
else:
catalog = db
hdr = catalog[run_id_uid]
hdr = catalog[run_id_uid]
run_id = hdr.start["scan_id"]
run_uid = hdr.start["uid"]
except Exception:
if isinstance(run_id_uid, int):
msg = f"ID {run_id_uid}"
else:
msg = f"UID '{run_id_uid}'"
raise RuntimeError(f"Failed to find run with {msg}.")
return run_id, run_uid
def fetch_data_from_db(
run_id_uid,
fpath=None,
create_each_det=False,
fname_add_version=False,
completed_scans_only=False,
successful_scans_only=False,
file_overwrite_existing=False,
output_to_file=False,
save_scaler=True,
num_end_lines_excluded=None,
skip_scan_types=None,
catalog_name=None,
):
"""
Read data from databroker.
This is the place where new beamlines can be easily added
to pyxrf GUI.
Save the data from databroker to hdf file if needed.
.. note:: Requires the databroker package from NSLS2
Parameters
----------
runid : int
id number for given run
fpath: str, optional
path to save hdf file
create_each_det: bool, optional
Do not create data for each detector is data size is too large,
if set as false. This will slow down the speed of creating hdf file
with large data size. srx beamline only.
fname_add_version : bool
True: if file already exists, then file version is added to the file name
so that it becomes unique in the current directory. The version is
added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.
False: then conversion fails.
completed_scans_only : bool
True: process only completed scans (for which ``stop`` document exists in
the database). Failed scan for which ``stop`` document exists are considered
completed even if not the whole image was scanned. If incomplete scan is
encountered, an exception is thrown.
False: the feature is disabled, incomplete scan will be processed.
file_overwrite_existing : bool, keyword parameter
This option should be used if the existing file should be deleted and replaced
with the new file with the same name. This option should be used with caution,
since the existing file may contain processed data, which will be permanently deleted.
True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,
then new versions of the existing file will always be created.
False: do not overwrite existing files. If the file already exists, then the exception
will be raised (loading the single scan) or the scan will be skipped (loading the range
of scans).
output_to_file : bool, optional
save data to hdf5 file if True
save_scaler : bool, optional
choose to save scaler data or not for srx beamline, test purpose only.
num_end_lines_excluded : int, optional
remove the last few bad lines
skip_scan_types: list(str) or None
list of plan type names to ignore, e.g. ['FlyPlan1D']. (Supported only at HXN.)
catalog_name: str or None
Returns
-------
dict of data in 2D format matching x,y scanning positions
"""
if catalog_name:
catalog = get_catalog(catalog_name)
else:
catalog = db
hdr = catalog[run_id_uid]
beamline_id = hdr.start["beamline_id"]
print("Loading data from database.")
if beamline_id == "HXN":
data = map_data2D_hxn(
run_id_uid,
fpath,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
successful_scans_only=successful_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=output_to_file,
skip_scan_types=skip_scan_types,
)
elif beamline_id == "xf05id" or beamline_id == "SRX":
data = map_data2D_srx(
run_id_uid,
fpath,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
successful_scans_only=successful_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=output_to_file,
save_scaler=save_scaler,
num_end_lines_excluded=num_end_lines_excluded,
catalog=catalog,
)
elif beamline_id == "XFM":
data = map_data2D_xfm(
run_id_uid,
fpath,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
successful_scans_only=successful_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=output_to_file,
)
elif beamline_id == "TES":
data = map_data2D_tes(
run_id_uid,
fpath,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
successful_scans_only=successful_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=output_to_file,
catalog=catalog,
)
else:
print("Databroker is not setup for this beamline")
return
free_memory_from_handler()
return data
def make_hdf(
start,
end=None,
*,
fname=None,
wd=None,
fname_add_version=False,
completed_scans_only=False,
successful_scans_only=False,
file_overwrite_existing=False,
prefix="scan2D_",
create_each_det=False,
save_scaler=True,
num_end_lines_excluded=None,
skip_scan_types=None,
catalog_name=None,
):
"""
Load data from database and save it in HDF5 files.
Parameters
----------
start : int
Run ID (positive or negative int) or of the first scan to convert or Run UID
(str, full or short). If `start` is UID, then `end` must not be provided or set to None.
end : int, optional
scan ID of the last scan to convert. If ``end`` is not specified or None, then
only the scan with ID ``start`` is converted and an exception is raised if an
error occurs during the conversion. If ``end`` is specified, then scans in the
range ``scan``..``end`` are converted and a scan in the sequence is skipped
if there is an issue during the conversion. For example:
.. code-block:: python
make_hdf(2342)
will process scan #2342 and throw an exception if error occurs. On the other hand
.. code-block:: python
make_hdf(2342, 2342)
will process scan #2342 and write data to file if conversion is successful, otherwise
no file will be created. The scans with IDs in the range 2342..2441 can be processed by
calling
.. code-block:: python
make_hdf(2342, 2441)
Scans with IDs in specified range, but not existing in the database, or scans causing errors
during conversion will be skipped.
fname : string, optional keyword parameter
path to save data file when ``end`` is ``None`` (only one scan is processed).
File name is created automatically if ``fname`` is not specified.
wd : str
working directory, the file(s) will be created in this directory. The directory
will be created if it does not exist. If ``wd`` is not specified, then the file(s)
will be saved to the current directory.
fname_add_version : bool, keyword parameter
True: if file already exists, then file version is added to the file name
so that it becomes unique in the current directory. The version is
added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.
False: then conversion fails. If ``end`` is ``None``, then
the exception is raised. If ``end`` is specified, the scan is skipped
and the next scan in the range is processed.
completed_scans_only : bool, keyword parameter
True: process only completed scans (for which ``stop`` document exists in
the database). Failed scan for which ``stop`` document exists are considered
completed even if not the whole image was scanned. If incomplete scan is
encountered: an exception is thrown (``end`` is not specified) or the scan
is skipped (``end`` is specified). This feature allows to use
``make_hdf`` as part of the script for real time data analysis:
.. code-block:: python
# Wait time between retires in seconds. Select the value appropriate
# for the workflow type.
wait_time = 600 # Wait for 10 minuts between retries.
for scan_id in range(n_start, n_start + n_scans):
while True:
try:
# Load scan if it is available
make_hdf(scan_id, completed_scans_only=True)
# Process the file using the prepared parameter file
pyxrf_batch(scan_id, param_file_name="some_parameter_file.json")
break
except Exception:
time.sleep(wait_time)
Such scripts are currently used at HXN and SRX beamlines of NSLS-II, so this feature
supports the existing workflows.
False: the feature is disabled, incomplete scan will be processed.
successful_scans_only : bool, keyword parameter
Similar to ``complete_scans_only``. The file is created only if the stop document
exists and ``exit_status=='success'``.
file_overwrite_existing : bool, keyword parameter
This option should be used if the existing file should be deleted and replaced
with the new file with the same name. This option should be used with caution,
since the existing file may contain processed data, which will be permanently deleted.
True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,
then new versions of the existing file will always be created.
False: do not overwrite existing files. If the file already exists, then the exception
will be raised (loading the single scan) or the scan will be skipped (loading the range
of scans).
prefix : str, optional
prefix name of the created data file. If ``fname`` is not specified, it is generated
automatically in the form ``<prefix>_<scanID>_<some_additional_data>.h5``
create_each_det: bool, optional
True: save data for each available detector channel into a file. Enabling this
feature leads to larger data files. Inspection of data from individual channels
of the detector may be helpful in evaluation of quality of the detector calibration
and adds flexibility to data analysis. This feature may be disabled if large number
of routine scans recorded by well tested system are processed and disk space
is an issue.
False: disable the feature. Only the sum of all detector channels is saved
to disk.
save_scaler : bool, optional
True: save scaler data in the data file
False: do not save scaler data
num_end_lines_excluded : int, optional
The number of lines at the end of the scan that will not be saved to the data file.
skip_scan_types: list(str) or None
The list of plan types (e.g. ['FlyPlan1D']) that should cause the loader to raise
an exception. The parameter is used to allow scripts to ignore certain plan types
when downloading data using ranges of scans IDs. (Supported only at HXN.)
catalog_name: str or None
Name of the catalog (e.g. `"srx"`). The function attempts to determine the catalog
name automatically if the parameter is not specified or `None`.
"""
if wd:
# Create the directory
wd = os.path.expanduser(wd)
wd = os.path.abspath(wd) # 'make_dirs' does not accept paths that contain '..'
os.makedirs(wd, exist_ok=True) # Does nothing if the directory already exists
if isinstance(start, str) or (end is None):
# Two cases: only one Run ID ('start') is provided or 'start' is Run UID.
# In both cases only one run is loaded.
if end is not None:
raise ValueError(r"Parameter 'end' must be None if run is loaded by UID")
run_id, run_uid = fetch_run_info(start, catalog_name) # This may raise RuntimeException
# Load one scan with ID specified by ``start``
# If there is a problem while reading the scan, the exception is raised.
if fname is None:
fname = prefix + str(run_id) + ".h5"
if wd:
fname = os.path.join(wd, fname)
fetch_data_from_db(
run_uid,
fpath=fname,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
successful_scans_only=successful_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=True,
save_scaler=save_scaler,
num_end_lines_excluded=num_end_lines_excluded,
skip_scan_types=skip_scan_types,
catalog_name=catalog_name,
)
else:
# Both ``start`` and ``end`` are specified. Convert the scans in the range
# ``start`` .. ``end``. If there is a problem reading the scan,
# then the scan is skipped and the next scan is processed
datalist = range(start, end + 1)
for v in datalist:
fname = prefix + str(v) + ".h5"
if wd:
fname = os.path.join(wd, fname)
try:
fetch_data_from_db(
v,
fpath=fname,
create_each_det=create_each_det,
fname_add_version=fname_add_version,
completed_scans_only=completed_scans_only,
successful_scans_only=successful_scans_only,
file_overwrite_existing=file_overwrite_existing,
output_to_file=True,
save_scaler=save_scaler,
num_end_lines_excluded=num_end_lines_excluded,
skip_scan_types=skip_scan_types,
catalog_name=catalog_name,
)
print(f"Scan #{v}: Conversion completed.\n")
except Exception as ex:
print(f"Scan #{v}: Can not complete the conversion")
print(f" ({ex})\n")
def _is_scan_complete(hdr):
"""Checks if the scan is complete ('stop' document exists)
Parameters
----------
hdr : databroker.core.Header
header of the run
hdr = db[scan_id]
The header must be reloaded each time before the function is called.
Returns
-------
True: scan is complete
False: scan is incomplete (still running)
"""
# hdr.stop is an empty dictionary if the scan is incomplete
return bool(hdr.stop)
def _is_scan_successful(hdr):
"""
Checks if the scan is successful
"""
return bool(hdr.stop) and hdr.stop["exit_status"] == "success"
def _extract_metadata_from_header(hdr):
"""
Extract metadata from start and stop document. Metadata extracted from other document
in the scan are beamline specific and added to dictionary at later time.
"""
start_document = hdr.start
mdata = ScanMetadataXRF()
data_locations = {
"scan_id": ["scan_id"],
"scan_uid": ["uid"],
"scan_instrument_id": ["beamline_id"],
"scan_instrument_name": [],
"scan_end_station": [],
"scan_time_start": ["time"],
"scan_time_start_utc": ["time"],
"instrument_mono_incident_energy": ["beamline_status/energy", "scan/energy"],
"instrument_beam_current": [],
"instrument_detectors": ["detectors", "scan/detectors"],
"sample_name": ["sample/name", "sample", "scan/sample_name"],
"experiment_plan_name": ["plan_name"],
"experiment_plan_type": ["plan_type"],
"proposal_num": ["proposal/proposal_num"],
"proposal_title": ["proposal/proposal_title"],
"proposal_PI_lastname": ["proposal/PI_lastname"],
"proposal_saf_num": ["proposal/saf_num"],
"proposal_cycle": ["proposal/cycle"],
# Scan parameters
"param_type": ["scan/type"],
"param_input": ["scan/scan_input"],
"param_dwell": ["scan/dwell", "exposure_time"],
"param_snake": ["scan/snake"],
"param_shape": ["scan/shape", "shape"],
"param_theta": ["scan/theta/val"],
"param_theta_units": ["scan/theta/units"],
"param_delta": ["scan/delta/val"],
"param_delta_units": ["scan/delta/units"],
"param_fast_axis": ["scaninfo/fast_axis", "scan/fast_axis/motor_name"],
"param_fast_axis_units": ["scan/fast_axis/units"],
"param_slow_axis": ["scaninfo/slow_axis", "scan/slow_axis/motor_name"],
"param_slow_axis_units": ["scan/slow_axis/units"],
}
for key, locations in data_locations.items():
# Go to the next key if no location is defined for the current key.
# No locations means that the data is not yet defined in start document on any beamline
# Multiple locations point to locations at different beamlines
if not locations:
continue
# For each metadata key there could be none, one or multiple locations in the start document
for loc in locations:
path = loc.split("/") #
ref = start_document
for n, p in enumerate(path):
if n >= len(path) - 1:
break
# 'ref' must always point to dictionary
if not isinstance(ref, dict):
ref = None
break
if p in ref:
ref = ref[p]
else:
ref = None
break
# At this point 'ref' must be a dictionary
value = None
if ref is not None and isinstance(ref, dict):
if path[-1] in ref:
value = ref[path[-1]]
# Now we finally arrived to the end of the path: the 'value' must be a scalar or a list
if value is not None and not isinstance(value, dict):
if path[-1] == "time":
if key.endswith("_utc"):
value = convert_time_to_nexus_string(ttime.gmtime(value))
else:
value = convert_time_to_nexus_string(ttime.localtime(value))
mdata[key] = value
break
stop_document = hdr.stop
if stop_document:
if "time" in stop_document:
t = stop_document["time"]
mdata["scan_time_stop"] = convert_time_to_nexus_string(ttime.localtime(t))
mdata["scan_time_stop_utc"] = convert_time_to_nexus_string(ttime.gmtime(t))
if "exit_status" in stop_document:
mdata["scan_exit_status"] = stop_document["exit_status"]
else:
mdata["scan_exit_status"] = "incomplete"
# Add full beamline name (if available, otherwise don't create the entry).
# Also, don't overwrite the existing name if it was read from the start document
if "scan_instrument_id" in mdata and "scan_instrument_name" not in mdata:
instruments = {
"srx": "Submicron Resolution X-ray Spectroscopy",
"hxn": "Hard X-ray Nanoprobe",
"tes": "Tender Energy X-ray Absorption Spectroscopy",
"xfm": "X-ray Fluorescence Microprobe",
}
iname = instruments.get(mdata["scan_instrument_id"].lower(), "")
if iname:
mdata["scan_instrument_name"] = iname
return mdata
def _get_metadata_value_from_descriptor_document(hdr, *, data_key, stream_name="baseline"):
"""
Returns the first occurrence of the variable with the name ``data_key`` in
specified document stream. Returns ``None`` if the variable is not found
"""
value = None
docs = hdr.documents(stream_name=stream_name)
for name, doc in docs:
if (name != "event") or ("descriptor" not in doc):
continue
try:
value = doc["data"][data_key]
break # Don't go through the rest of the documents
except Exception:
pass
return value
def _get_metadata_all_from_descriptor_document(hdr, *, data_key, stream_name="baseline"):
"""
Returns the list of the recorded values of variables with the name ``data_key`` in
specified document stream. Returns ``None`` if the variable is not found
"""
value = []
docs = hdr.documents(stream_name=stream_name)
for name, doc in docs:
if (name != "event") or ("descriptor" not in doc):
continue
try:
value.append(doc["data"][data_key])
except Exception:
pass
value = value or None # Replace [] with None
return value
def _get_metadata_value_from_descriptor_document_tiled(hdr, *, data_key, stream_name="baseline"):
"""
Returns the first occurrence of the variable with the name ``data_key`` in
specified document stream. Returns ``None`` if the variable is not found
"""
value = None
try:
value = hdr[stream_name]["data"][data_key].compute()[0]
except Exception:
pass
return value
def _get_metadata_all_from_descriptor_document_tiled(hdr, *, data_key, stream_name="baseline"):
"""
Returns the list of the recorded values of variables with the name ``data_key`` in
specified document stream. Returns ``None`` if the variable is not found
"""
value = []
try:
value = list(hdr[stream_name]["data"][data_key].compute())
except Exception:
pass
return value or None # Replace [] with None
def map_data2D_hxn(
run_id_uid,
fpath,
create_each_det=False,
fname_add_version=False,
completed_scans_only=False,
successful_scans_only=False,
file_overwrite_existing=False,
output_to_file=True,
skip_scan_types=None,
):
"""
Save the data from databroker to hdf file.
.. note:: Requires the databroker package from NSLS2
Parameters
----------
run_id_uid : int
ID or UID of a run
fpath: str
path to save hdf file
create_each_det: bool, optional
Do not create data for each detector is data size is too large,
if set as false. This will slow down the speed of creating hdf file
with large data size.
fname_add_version : bool
True: if file already exists, then file version is added to the file name
so that it becomes unique in the current directory. The version is
added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.
False: then conversion fails.
completed_scans_only : bool
True: process only completed scans (for which ``stop`` document exists in
the database). Failed scan for which ``stop`` document exists are considered
completed even if not the whole image was scanned. If incomplete scan is
encountered: an exception is thrown.
False: the feature is disabled, incomplete scan will be processed.
file_overwrite_existing : bool, keyword parameter
This option should be used if the existing file should be deleted and replaced
with the new file with the same name. This option should be used with caution,
since the existing file may contain processed data, which will be permanently deleted.
True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,
then new versions of the existing file will always be created.
False: do not overwrite existing files. If the file already exists, then the exception
is raised.
output_to_file : bool, optional
save data to hdf5 file if True
"""
hdr = db[run_id_uid]
runid = hdr.start["scan_id"] # Replace with the true value (runid may be relative, such as -2)
logger.info(f"Loading scan #{runid}")
if completed_scans_only and not _is_scan_complete(hdr):
raise Exception("Scan is incomplete. Only completed scans are currently processed.")
if successful_scans_only and not _is_scan_successful(hdr):
raise Exception(
"Scan is not successfully completed. Only successfully completed scans are currently processed."
)
# Generate the default file name for the scan
if fpath is None:
fpath = f"scan2D_{runid}.h5"
# Output data is the list of data structures for all available detectors
data_output = []
start_doc = hdr["start"]
if "scan" in start_doc:
#print(" panda scan ")
plan_type = start_doc["scan"]["type"]
else:
plan_type = start_doc["plan_type"]
logger.info("Plan type: '%s'", plan_type)
# Exclude certain types of plans based on data from the start document
if isinstance(skip_scan_types, (list, tuple)) and (plan_type in skip_scan_types):
raise RuntimeError(
f"Failed to load the scan: plan type {plan_type!r} is in the list of skipped types"
)
# The dictionary holding scan metadata
mdata = _extract_metadata_from_header(hdr)
# Some metadata is located at specific places in the descriptor documents
# Search through the descriptor documents for the metadata
v = _get_metadata_value_from_descriptor_document(
hdr, data_key="beamline_status_beam_current", stream_name="baseline"
)
if v is not None:
mdata["instrument_beam_current"] = v
v = _get_metadata_value_from_descriptor_document(hdr, data_key="energy", stream_name="baseline")
if v is not None:
mdata["instrument_mono_incident_energy"] = v
# --------------------------------------------------------------------------------------------
# IDENTIFY END STATION AND SELECT THE APPROPRIATE THETA ANGLE FROM BASELINE
# Identify endstation
end_station = ""
es_motors = hdr.start["motors"]
motors_mll, motors_zp = ("dssx", "dssy", "dssz"), ("zpssx", "zpssy", "zpssz")
if es_motors[0] in motors_mll:
end_station = "MLL"
elif es_motors[0] in motors_zp:
end_station = "ZP"
else:
logger.warning("Failed to identify end station from data found in start document.")
if end_station:
mdata["scan_end_station"] = end_station
logger.info(f"Identified beamline end station: {end_station!r}")
# Get theta angles (each scan has the angles for both endstations, but we need to pick one)
v = _get_metadata_value_from_descriptor_document(
hdr, data_key="beamline_status_beam_current", stream_name="baseline"
)
if end_station == "MLL":
theta = _get_metadata_value_from_descriptor_document(hdr, data_key="dsth", stream_name="baseline") # MLL
elif end_station == "ZP":
theta = _get_metadata_value_from_descriptor_document(hdr, data_key="zpsth", stream_name="baseline") # ZP
else:
theta = None
# Add theta to the the metadata
if theta is not None:
mdata["param_theta"] = round(theta * 1000) # Convert to mdeg (same as SRX)
mdata["param_theta_units"] = "mdeg"
theta = round(theta, 3) # Better presentation
else:
logger.warning("Angle 'theta' is not found and is not included in the HDF file metadata")
# ------------------------------------------------------------------------------------------------
# Dimensions of the scan
if "dimensions" in start_doc:
datashape = start_doc.dimensions
elif "shape" in start_doc:
datashape = start_doc.shape
elif "num_points" in start_doc:
datashape = [start_doc.num_points]
else:
logger.error("No dimension/shape is defined in hdr.start.")
n_dimensions = len(datashape)
pos_list = None
if "motors" in start_doc:
pos_list = start_doc.motors
elif "axes" in start_doc:
pos_list = start_doc.axes
if (pos_list is not None) and (n_dimensions == 1): # 1D scan
pname = pos_list[0]
if pname.lower().endswith("x"):
pos_list.append(pname[:-1] + "y")
else:
pos_list.append(pname[:-1] + "x")
if pos_list is None:
pos_list = ["zpssx[um]", "zpssy[um]"]
if n_dimensions == 1:
if ("y" in pos_list[0]) or ("x" in pos_list[1]):
datashape = [datashape[0], 1]
else:
datashape = [1, datashape[0]]
mdata["param_shape"] = [datashape[0], 1]
elif n_dimensions == 2:
if start_doc.plan_name == "grid_scan":
datashape = [datashape[0], datashape[1]]
else:
datashape = [datashape[1], datashape[0]]
else:
raise ValueError(f"Invalid data shape: {datashape}. Must be a list with 1 or 2 elements.")
logger.info(f'Data shape: {datashape}.')
# -----------------------------------------------------------------------------------------------
# Determine fast axis and slow axis
fast_axis, slow_axis, fast_axis_index = start_doc.get("fast_axis", None), None, None
motors = start_doc.get("motors", None)
if motors and isinstance(motors, (list, tuple)) and len(motors) == 1:
fast_axis = fast_axis if fast_axis else motors[0]
fast_axis_index = motors.index(fast_axis, 0)
elif motors and isinstance(motors, (list, tuple)) and len(motors) == 2:
fast_axis = fast_axis if fast_axis else motors[0]
fast_axis_index = motors.index(fast_axis, 0)
slow_axis_index = 0 if (fast_axis_index == 1) else 1
slow_axis = motors[slow_axis_index]
elif n_dimensions == 1:
fast_axis, fast_axis_index, slow_axis = pos_list[0], 0, pos_list[1]
if fast_axis:
mdata["param_fast_axis"] = fast_axis
if slow_axis:
mdata["param_slow_axis"] = slow_axis
# -----------------------------------------------------------------------------------------------
# Reconstruct scan input
try:
if "plan_args" in start_doc: # dscan and fly1d/fly2d scan
plan_args = start_doc["plan_args"]
# px_motor = plan_args["motor1"]
px_start, px_end, px_step = plan_args["scan_start1"], plan_args["scan_end1"], plan_args["num1"]
# py_motor = plan_args["motor2"]
py_start, py_end, py_step = plan_args["scan_start2"], plan_args["scan_end2"], plan_args["num2"]
dwell_time = plan_args["exposure_time"]
param_input = [px_start, px_end, px_step, py_start, py_end, py_step, dwell_time]
mdata["param_input"] = param_input
elif "scan" in start_doc: # fly1dpd and fly2dpd scan
scan_input = start_doc["scan"]["scan_input"]
px_start, px_end, px_step = scan_input[0:3]
py_start, py_end, py_step = scan_input[3:6]
dwell_time = start_doc["scan"]["dwell"]
param_input = [px_start, px_end, px_step, py_start, py_end, py_step, dwell_time]
mdata["param_input"] = param_input
else:
raise Exception("Unknown scan plan type")
except Exception as ex:
logger.warning(
"Failed to reconstruct scan input: %s. Scan input is not saved as part of metadata to HDF5 file",
str(ex),
)
# -------------------------------------------------------------------------------------------------
fly_type = start_doc.get("fly_type", None)
subscan_dims = start_doc.get("subscan_dims", None)
current_dir = os.path.dirname(os.path.realpath(__file__))
config_file = "hxn_pv_config.json"
config_path = sep_v.join(current_dir.split(sep_v)[:-2] + ["configs", config_file])
with open(config_path, "r") as json_data:
config_data = json.load(json_data)
keylist = hdr.descriptors[0].data_keys.keys()
det_list = [v for v in keylist if "xspress3" in v] # find xspress3 det with key word matching
det_list = [v for v in det_list if len(v)==12] #added to filter out other rois added by user
scaler_list_all = config_data["scaler_list"]
all_keys = hdr.descriptors[0].data_keys.keys()
scaler_list = [v for v in scaler_list_all if v in all_keys]
fields = det_list + scaler_list + pos_list
# Do not use supply 'fields' if Databroker V0 is used
if isinstance(db, databroker._core.Broker):
fields = None
data = hdr.table(fields=fields, fill=False) # HXN data is stored in h5 files, load them later in map_data2D.
# This is for the case of 'dcan' (1D), where the slow axis positions are not saved
if (slow_axis not in data) and (fast_axis in data):
data[slow_axis] = np.zeros(shape=data[fast_axis].shape)
data_out = map_data2D(
data,
datashape,
det_list=det_list,
pos_list=pos_list,
scaler_list=scaler_list,
create_each_det=create_each_det,
fly_type=fly_type,
subscan_dims=subscan_dims,
spectrum_len=4096,
hdr=hdr
)
# Transform coordinates for the fast axis if necessary:
# Flip the direction of the fast axis for certain angles
if (theta is not None) and fast_axis.lower().endswith("z") and (theta < 0):
logger.info(f"Fast axis: {fast_axis!r}. Angle 'theta': {theta}. Flipping data along the fast axis ...")
data_out["pos_data"][fast_axis_index, :, :] = np.fliplr(data_out["pos_data"][fast_axis_index, :, :])
data_out["scaler_data"] = np.flip(data_out["scaler_data"], axis=1)
data_out["det_sum"] = np.flip(data_out["det_sum"], axis=1)
for k in data.keys():
if re.search(r"^det[\d]+$", k): # Individual detectors such as 'det1', 'det2', etc.
data_out[k] = np.flip(data_out[k], axis=1)
else: