diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 398ce9c49319e1b96243f2b4d8cb0949eca08938..8d218c5fa97e742f6b317fe7269ec38ab6446c1f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -68,7 +68,7 @@ automated_test: - python3 -m pip install ".[automated_test]" - echo "Running automated test. This can take sometime to finish depending on the test data." - echo "Given variables are REFERENCE=$REFERENCE, OUTPUT=$OUTPUT, DETECTOR=$DETECTORS, CALIBRATION=$CALIBRATION" - - python3 -m pytest ./tests/test_reference_runs --color yes --verbose --release-test --reference-folder /gpfs/exfel/data/scratch/xcaltst/test/$REFERENCE --out-folder /gpfs/exfel/data/scratch/xcaltst/test/$OUTPUT --detectors $DETECTORS --calibration $CALIBRATION + - python3 -m pytest ./tests/test_reference_runs --color yes --verbose --release-test --reference-folder /gpfs/exfel/data/scratch/xcaltst/test/$REFERENCE --out-folder /gpfs/exfel/data/scratch/xcaltst/test/$OUTPUT --detectors $DETECTORS --calibration $CALIBRATION --find-difference timeout: 24 hours cython-editable-install-test: diff --git a/notebooks/REMI/REMI_Digitize_and_Transform.ipynb b/notebooks/REMI/REMI_Digitize_and_Transform.ipynb index ddb1c0c17976f3aaa87928d0aa2d34728949bbd1..fc1ac99babe5143276436c865e383dc0b67237b4 100644 --- a/notebooks/REMI/REMI_Digitize_and_Transform.ipynb +++ b/notebooks/REMI/REMI_Digitize_and_Transform.ipynb @@ -412,32 +412,59 @@ " train_triggers['fel'] = [pos in fel_pos for pos in all_pos]\n", " train_triggers['ppl'] = [pos in ppl_pos for pos in all_pos]\n", "\n", - "with timing('find_triggers'):\n", - " psh.map(trigger_by_ppt, ppt_data)\n", " \n", - "if (np.unique(triggers['pulse'][1:] - triggers['pulse'][:-1]) > 0).sum() > 1:\n", - " # There is more than one delta between pulse entries across all pulses. This is not\n", - " # necessarily a problem, as the pattern could simply have changed in between trains\n", - " # with each train being split properly.\n", - " # If there's more than one delta in a single train, this likely points to a mismatch\n", - " # of FEL and PPL repetition rate. This is most likely not intended.\n", + "if ignore_fel and ignore_ppl:\n", + " # Both FEL and PPL are ignored, use virtual full train triggers.\n", + " print('WARNING: Both FEL and PPL pulses are ignored, '\n", + " 'virtual trigger is inserted covering the entire train')\n", " \n", - " one = np.uint64(1) # Because np.uint64 + int = np.float64\n", - " pulse_deltas = set()\n", - "\n", - " for pulse_id, (offset, count) in enumerate(zip(pulse_offsets, pulse_counts)):\n", - " deltas = triggers['pulse'][offset+one:offset+count] - triggers['pulse'][offset:offset+count-one]\n", - "\n", - " if len(np.unique(deltas)) > 1:\n", - " for delta in deltas:\n", - " pulse_deltas.add(delta)\n", - "\n", - " if len(pulse_deltas) > 1:\n", - " delta_str = ', '.join([str(x) for x in sorted(pulse_deltas)])\n", - " warning(f'Different pulse lengths (PPT: {delta_str}) encountered within single trains, '\n", - " f'separated pulse spectra may split up signals!')\n", - " else:\n", - " warning('Different pulse lengths encountered across trains, separation may be unstable!')" + " # Overwrite global pulse statistics computed before,\n", + " num_pulses = len(dc.train_ids)\n", + " pulse_counts[:] = 1\n", + " pulse_counts = pulse_counts.astype(np.int32)\n", + " pulse_offsets = np.arange(len(pulse_counts)).astype(np.int32)\n", + "\n", + " # Obtain minimal trace length.\n", + " min_trace_len = min([\n", + " dc[src, key].entry_shape[0]\n", + " for det_name in remi['detector'].keys()\n", + " for src, key in remi.get_detector_sourcekeys(det_name)\n", + " ])\n", + "\n", + " triggers['start'] = first_pulse_offset\n", + " triggers['stop'] = min_trace_len\n", + " triggers['offset'] = 0.0\n", + " triggers['pulse'] = -1\n", + " triggers['fel'] = False\n", + " triggers['ppl'] = False \n", + " \n", + "else:\n", + " with timing('find_triggers'):\n", + " psh.map(trigger_by_ppt, ppt_data)\n", + " \n", + " if (np.unique(triggers['pulse'][1:] - triggers['pulse'][:-1]) > 0).sum() > 1:\n", + " # There is more than one delta between pulse entries across all pulses. This is not\n", + " # necessarily a problem, as the pattern could simply have changed in between trains\n", + " # with each train being split properly.\n", + " # If there's more than one delta in a single train, this likely points to a mismatch\n", + " # of FEL and PPL repetition rate. This is most likely not intended.\n", + "\n", + " one = np.uint64(1) # Because np.uint64 + int = np.float64\n", + " pulse_deltas = set()\n", + "\n", + " for pulse_id, (offset, count) in enumerate(zip(pulse_offsets, pulse_counts)):\n", + " deltas = triggers['pulse'][offset+one:offset+count] - triggers['pulse'][offset:offset+count-one]\n", + "\n", + " if len(np.unique(deltas)) > 1:\n", + " for delta in deltas:\n", + " pulse_deltas.add(delta)\n", + "\n", + " if len(pulse_deltas) > 1:\n", + " delta_str = ', '.join([str(x) for x in sorted(pulse_deltas)])\n", + " warning(f'Different pulse lengths (PPT: {delta_str}) encountered within single trains, '\n", + " f'separated pulse spectra may split up signals!')\n", + " else:\n", + " warning('Different pulse lengths encountered across trains, separation may be unstable!')" ] }, { @@ -878,7 +905,7 @@ "max_num_hits = 0.0\n", " \n", "for det_name in remi['detector'].keys():\n", - " agg_window = num_pulses // 1000\n", + " agg_window = num_pulses // min(1000, num_pulses)\n", " \n", " num_hits = np.isfinite(det_data[det_name]['hits']['x']).sum(axis=1)\n", " num_hits = num_hits[:(len(num_hits) // agg_window) * agg_window]\n", @@ -1127,6 +1154,7 @@ " \n", " for det_name in remi['detector']:\n", " cur_device_id = det_device_id.format(karabo_id=karabo_id, det_name=det_name.upper())\n", + " cur_max_hits = remi['detector'][det_name]['max_hits']\n", " \n", " cur_control_data = outp.create_control_source(cur_device_id)\n", " # Manually manipulate the file here, still creates the index properly.\n", @@ -1139,23 +1167,36 @@ " \n", " if save_raw_triggers:\n", " cur_fast_data.create_key('raw.triggers', triggers[pulse_mask],\n", + " maxshape=(None,) + triggers.shape[1:],\n", " chunks=tuple(chunks_triggers), **dataset_kwargs)\n", " \n", " if save_raw_edges:\n", " cur_fast_data.create_key('raw.edges', cur_data['edges'][pulse_mask],\n", - " chunks=tuple(chunks_edges), **dataset_kwargs)\n", + " maxshape=(None,) + cur_data['edges'].shape[1:],\n", + " chunks=tuple(chunks_edges if chunks_edges[-1] <= cur_max_hits\n", + " else chunks_edges[:-1] + [cur_max_hits]),\n", + " **dataset_kwargs)\n", " \n", " if save_raw_amplitudes:\n", " cur_fast_data.create_key('raw.amplitudes', cur_data['amplitudes'][pulse_mask],\n", - " chunks=tuple(chunks_amplitudes), **dataset_kwargs)\n", + " maxshape=(None,) + cur_data['amplitudes'].shape[1:],\n", + " chunks=tuple(chunks_amplitudes if chunks_amplitudes[-1] <= cur_max_hits\n", + " else chunks_amplitudes[:-1] + [cur_max_hits]),\n", + " **dataset_kwargs)\n", " \n", " if save_rec_signals:\n", " cur_fast_data.create_key('rec.signals', cur_data['signals'][pulse_mask],\n", - " chunks=tuple(chunks_signals), **dataset_kwargs)\n", + " maxshape=(None,) + cur_data['signals'].shape[1:],\n", + " chunks=tuple(chunks_signals if chunks_signals[-1] <= cur_max_hits\n", + " else chunks_signals[:-1] + [cur_max_hits]),\n", + " **dataset_kwargs)\n", " \n", " if save_rec_hits:\n", " cur_fast_data.create_key('rec.hits', cur_data['hits'][pulse_mask],\n", - " chunks=tuple(chunks_hits), **dataset_kwargs)\n", + " maxshape=(None,) + hits.shape[1:],\n", + " chunks=tuple(chunks_hits if chunks_hits[-1] <= cur_max_hits\n", + " else chunks_hits[:-1] + [cur_max_hits]),\n", + " **dataset_kwargs)\n", " \n", " cur_fast_data.create_index(raw=pulse_counts[train_mask], rec=pulse_counts[train_mask])\n", " \n", diff --git a/notebooks/ePix100/Characterize_Darks_ePix100_NBC.ipynb b/notebooks/ePix100/Characterize_Darks_ePix100_NBC.ipynb index 3d2da0b11e8dfbcd569c1a9385612fbdba185452..cfaf022060c1028f47fccfaa3c13931751f6aa77 100644 --- a/notebooks/ePix100/Characterize_Darks_ePix100_NBC.ipynb +++ b/notebooks/ePix100/Characterize_Darks_ePix100_NBC.ipynb @@ -27,14 +27,14 @@ "metadata": {}, "outputs": [], "source": [ - "in_folder = '/gpfs/exfel/exp/HED/202330/p900338/raw' # input folder, required\n", + "in_folder = '/gpfs/exfel/exp/MID/202330/p900329/raw' # input folder, required\n", "out_folder = '' # output folder, required\n", "metadata_folder = '' # Directory containing calibration_metadata.yml when run by xfel-calibrate\n", "sequence = 0 # sequence file to use\n", - "run = 176 # which run to read data from, required\n", + "run = 106 # which run to read data from, required\n", "\n", "# Parameters for accessing the raw data.\n", - "karabo_id = \"HED_IA1_EPX100-1\" # karabo karabo_id\n", + "karabo_id = \"MID_EXP_EPIX-1\" # karabo karabo_id\n", "karabo_da = [\"EPIX01\"] # data aggregators\n", "receiver_template = \"RECEIVER\" # detector receiver template for accessing raw data files\n", "path_template = 'RAW-R{:04d}-{}-S{{:05d}}.h5' # the template to use to access data\n", diff --git a/notebooks/ePix100/Correction_ePix100_NBC.ipynb b/notebooks/ePix100/Correction_ePix100_NBC.ipynb index 9585d4eb66c9f7ff5d876bef40992ec95d9e3120..89fa139226537207b3b447893c3a26c57333ccb5 100644 --- a/notebooks/ePix100/Correction_ePix100_NBC.ipynb +++ b/notebooks/ePix100/Correction_ePix100_NBC.ipynb @@ -24,15 +24,15 @@ "metadata": {}, "outputs": [], "source": [ - "in_folder = \"/gpfs/exfel/exp/MID/202301/p003346/raw\" # input folder, required\n", + "in_folder = \"/gpfs/exfel/exp/HED/202102/p002739/raw\" # input folder, required\n", "out_folder = \"\" # output folder, required\n", "metadata_folder = \"\" # Directory containing calibration_metadata.yml when run by xfel-calibrate\n", "sequences = [-1] # sequences to correct, set to -1 for all, range allowed\n", "sequences_per_node = 1 # number of sequence files per cluster node if run as slurm job, set to 0 to not run SLURM parallel\n", - "run = 55 # which run to read data from, required\n", + "run = 38 # which run to read data from, required\n", "\n", "# Parameters for accessing the raw data.\n", - "karabo_id = \"MID_EXP_EPIX-1\" # karabo karabo_id\n", + "karabo_id = \"HED_IA1_EPX100-1\" # karabo karabo_id\n", "karabo_da = \"EPIX01\" # data aggregators\n", "db_module = \"\" # module id in the database\n", "receiver_template = \"RECEIVER\" # detector receiver template for accessing raw data files\n", @@ -612,19 +612,6 @@ " # Create count/first datasets at INDEX source.\n", " outp_source.create_index(data=image_counts)\n", "\n", - " # Store uncorrected RAW image datasets for the corrected trains.\n", - "\n", - " data_raw_fields = [ # /data/\n", - " \"ambTemp\", \"analogCurr\", \"analogInputVolt\", \"backTemp\",\n", - " \"digitalInputVolt\", \"guardCurr\", \"relHumidity\", \"digitalCurr\"\n", - " ]\n", - " for field in data_raw_fields:\n", - " field_arr = seq_dc[instrument_src, f\"data.{field}\"].ndarray()\n", - "\n", - " outp_source.create_key(\n", - " f\"data.{field}\", data=field_arr,\n", - " chunks=(chunk_size_idim, *field_arr.shape[1:]))\n", - "\n", " image_raw_fields = [ # /data/image/\n", " \"binning\", \"bitsPerPixel\", \"dimTypes\", \"dims\",\n", " \"encoding\", \"flipX\", \"flipY\", \"roiOffsets\", \"rotation\",\n", @@ -641,6 +628,11 @@ " \"data.image.pixels\", data=data, chunks=dataset_chunk)\n", " outp_source.create_key(\n", " \"data.trainId\", data=seq_dc.train_ids, chunks=min(50, len(seq_dc.train_ids)))\n", + " \n", + " if np.isin('data.pulseId', list(seq_dc[instrument_src].keys())): # some runs are missing 'data.pulseId'\n", + " outp_source.create_key(\n", + " \"data.pulseId\", data=list(seq_dc[instrument_src]['data.pulseId'].ndarray().squeeze()), chunks=min(50, len(seq_dc.train_ids)))\n", + " \n", " if pattern_classification:\n", " # Add main corrected `data.image.pixels` dataset and store corrected data.\n", " outp_source.create_key(\n", diff --git a/src/cal_tools/epix100/epix100lib.py b/src/cal_tools/epix100/epix100lib.py index 9cea56634612b1cc3ab38e9a49ba6210ae05cb64..d2c71e1b912b5fe5137131f21b4ad38f3298edff 100644 --- a/src/cal_tools/epix100/epix100lib.py +++ b/src/cal_tools/epix100/epix100lib.py @@ -23,12 +23,24 @@ class epix100Ctrl(): def get_temprature(self): """Get temperature value from CONTROL. - Temprature is stored in Celsius/100 units. - Therefore, we are dividing by 100 and - there is an absolute tolerance of 100. - atol=100 is a 1 degree variation tolerance. + atol is degree variation tolerance. """ - # data.backTemp shape evolved from (n_trains,) to (n_trains, 1) - return self.run_dc[ - self.instrument_src, 'data.backTemp'].as_single_value( - reduce_by='mean', atol=100).item() / 100 + # old receiver device configuration + # temperature was stored in: + # source: 'MID_EXP_EPIX-1/DET/RECEIVER:daqOutput' + # key: 'data.backTemp' + if 'data.backTemp' in self.run_dc[self.instrument_src]: + # using `item()` because data.backTemp shape evolved from (n_trains,) to (n_trains, 1) + # atol = 100 because temperature was in C/100 + return self.run_dc[ + self.instrument_src, 'data.backTemp'].as_single_value( + reduce_by='mean', atol=100).item() / 100 + + # new (2023) receiver device configuration + # temperature is stored in: + # source: 'MID_EXP_EPIX-1/DET/RECEIVER' + # key: 'slowdata.backTemp.value' + else: + return self.run_dc[ + self.instrument_src.split(':daqOutput')[0], 'slowdata.backTemp.value'].as_single_value( + reduce_by='mean', atol=1) \ No newline at end of file diff --git a/webservice/templates/last_characterizations.html b/webservice/templates/last_characterizations.html index 1a9b893740aabbe1a7ffe80f26668a9e9276e18e..d874bff9d53767358c729154335d0f2e5c92d396 100644 --- a/webservice/templates/last_characterizations.html +++ b/webservice/templates/last_characterizations.html @@ -4,7 +4,12 @@ <h3>{{ instrument }}</h3> <dl> <dt>Requested:</dt><dd>{{ data['requested'] }}</dd> - <dt>Check in DB:</dt><dd><a href="https://in.xfel.eu/calibration/admin/calibration_constant_version?model_name=calibration_constant_version&utf8=%E2%9C%93&f%5Bphysical_device%5D%5B02524%5D%5Bo%5D=like&f%5Bphysical_device%5D%5B02524%5D%5Bv%5D={{ data['device_type'] }}&f%5Bcalibration_constant%5D%5B02697%5D%5Bo%5D=like&f%5Bcalibration_constant%5D%5B02697%5D%5Bv%5D=Offset&query=" target="_blank"> Open in calDB </a></dd> + <dt>Check in DB:</dt><dd> + {% for pdf in data['pdfs'] %} + <a href="https://in.xfel.eu/calibration/reports/by_file/{{ pdf[1] }}" target="_blank">Open in calDB</a> + {% endfor %} + + </dd> <dt>Output path:</dt><dd>{{ data['out_path'] }}</dd> <dt>Input path:</dt><dd>{{ data['in_path'] }}</dd> <dt>Input runs:</dt><dd>{{ data['runs'] }}</dd>