diff --git a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
index 442f0d65e6b338125d96a4eb47fce961ca45b3f5..5ef0c5b28f63a598a4809547edaf703e47c9eeb5 100644
--- a/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
+++ b/notebooks/AGIPD/AGIPD_Correct_and_Verify.ipynb
@@ -89,6 +89,9 @@
     "low_medium_gap = False # 5 sigma separation in thresholding between low and medium gain\n",
     "round_photons = False  # Round to absolute number of photons, only use with gain corrections\n",
     "\n",
+    "# Additional processing\n",
+    "count_lit_pixels = False # Count the number of pixels registering photons\n",
+    "\n",
     "# Optional auxiliary devices\n",
     "use_ppu_device = ''  # Device ID for a pulse picker device to only process picked trains, empty string to disable\n",
     "ppu_train_offset = 0  # When using the pulse picker, offset between the PPU's sequence start and actually picked train\n",
@@ -259,6 +262,7 @@
     "    corr_bools[\"mask_zero_std\"] = mask_zero_std\n",
     "    corr_bools[\"low_medium_gap\"] = low_medium_gap\n",
     "    corr_bools[\"round_photons\"] = round_photons\n",
+    "    corr_bools[\"count_lit_pixels\"] = count_lit_pixels\n",
     "\n",
     "# Many corrections don't apply to fixed gain mode; will explicitly disable later if detected\n",
     "disable_for_fixed_gain = [\n",
@@ -532,7 +536,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "if round_photons and photon_energy <= 0.0:\n",
+    "photon_energy_warn = None\n",
+    "if photon_energy <= 0.0:\n",
     "    if use_xgm_device:\n",
     "        # Try to obtain photon energy from XGM device.\n",
     "        wavelength_data = dc[use_xgm_device, 'pulseEnergy.wavelengthUsed']\n",
@@ -544,18 +549,57 @@
     "            photon_energy = (h * c / e) / (wavelength_data.as_single_value(rtol=1e-2) * 1e-6)\n",
     "            print(f'Obtained photon energy {photon_energy:.3f} keV from {use_xgm_device}')\n",
     "        except ValueError:\n",
-    "            warning('XGM source available but photon energy varies greater than 1%, '\n",
-    "                 'photon rounding disabled!')\n",
-    "            round_photons = False\n",
+    "            photon_energy_warn = 'XGM source available but photon energy varies greater than 1%'\n",
     "    else:\n",
-    "        warning('Neither explicit photon energy nor XGM device configured, photon rounding disabled!')\n",
+    "        photon_energy_warn = 'Neither explicit photon energy nor XGM device configured'\n",
+    "\n",
+    "rounding_threshold_warn = None\n",
+    "if rounding_threshold <= .0 or 1. <= rounding_threshold:\n",
+    "    rounding_threshold_warn = 'Round threshold is out of (0, 1) range. Use standard 0.5 value.'\n",
+    "    rounding_threshold = 0.5\n",
+    "\n",
+    "if round_photons:\n",
+    "    if photon_energy_warn:\n",
+    "        warning(photon_energy_warn + ', photon rounding disabled!')\n",
     "        round_photons = False\n",
-    "elif round_photons:\n",
-    "    print(f'Photon energy for rounding: {photon_energy:.3f} keV')\n",
+    "    else:\n",
+    "        print(f'Photon energy for rounding: {photon_energy:.3f} keV')\n",
+    "        if rounding_threshold_warn:\n",
+    "            warning(rounding_threshold_warn)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if round_photons:\n",
+    "    data_units = 'photon'\n",
+    "    litpx_threshold = 1.\n",
+    "    if not xray_gain:\n",
+    "        litpx_threshold *= 7.\n",
+    "else:\n",
+    "    data_units = 'keV'\n",
+    "    if photon_energy_warn:\n",
+    "        warning(photon_energy_warn, '. Use 12 keV for lit-pixel counter threshold')\n",
+    "        litpx_threshold = 12.\n",
+    "    else:\n",
+    "        litpx_threshold = photon_energy\n",
+    "\n",
+    "    if rounding_threshold_warn:\n",
+    "        warning(rounding_threshold_warn)\n",
+    "\n",
+    "if not xray_gain:\n",
+    "    # convert photon energy to ADU (for AGIPD approx. 1 keV = 7 ADU)\n",
+    "    # it looks that rounding to photons can be applied to data in ADU as well\n",
+    "    litpx_threshold *= 7.\n",
+    "    data_units = 'ADU'\n",
     "\n",
-    "if round_photons and (rounding_threshold <= .0 or 1. <= rounding_threshold):\n",
-    "    warning('Round threshould is out of (0, 1) range. Use standard 0.5 value.')\n",
-    "    rounding_threshold = 0.5"
+    "litpx_threshold *= rounding_threshold\n",
+    "\n",
+    "if count_lit_pixels:\n",
+    "    print(f\"Count lit-pixels with signal above {litpx_threshold:.3g} {data_units}\")"
    ]
   },
   {
@@ -588,6 +632,7 @@
     "agipd_corr.photon_energy = photon_energy\n",
     "agipd_corr.rounding_threshold = rounding_threshold\n",
     "agipd_corr.cs_mg_adjust = cs_mg_adjust\n",
+    "agipd_corr.litpx_threshold = litpx_threshold\n",
     "\n",
     "agipd_corr.compress_fields = compress_fields\n",
     "if recast_image_data:\n",
@@ -906,6 +951,11 @@
     "        # Perform image-wise correction\"\n",
     "        pool.starmap(agipd_corr.gain_correction, imagewise_chunks(img_counts))\n",
     "        step_timer.done_step(\"Gain corrections\")\n",
+    "        \n",
+    "        # Peform additional processing\n",
+    "        if count_lit_pixels:\n",
+    "            pool.starmap(agipd_corr.litpixel_counter, imagewise_chunks(img_counts))\n",
+    "            step_timer.done_step(\"Lit-pixel counting\")\n",
     "\n",
     "        # Save corrected data\n",
     "        pool.starmap(agipd_corr.write_file, [\n",
diff --git a/src/cal_tools/agipdlib.py b/src/cal_tools/agipdlib.py
index 05662d4fa126cca01f9f213668739e99d0b9c62c..f52125bbb9d41df63b10e94929d1669d348e6dda 100644
--- a/src/cal_tools/agipdlib.py
+++ b/src/cal_tools/agipdlib.py
@@ -12,7 +12,12 @@ import numpy as np
 import sharedmem
 from dateutil import parser
 from extra_data import (
-    DataCollection, H5File, RunDirectory, by_id, PropertyNameError, SourceNameError,
+    DataCollection,
+    H5File,
+    PropertyNameError,
+    RunDirectory,
+    SourceNameError,
+    by_id,
 )
 
 from cal_tools import agipdalgs as calgs
@@ -28,7 +33,6 @@ from cal_tools.agipdutils import (
     melt_snowy_pixels,
 )
 from cal_tools.enums import AgipdGainMode, BadPixels, SnowResolution
-from logging import warning
 
 
 @dataclass
@@ -72,7 +76,7 @@ class AgipdCtrl:
         else:
             raise ValueError(f"No raw images found for {self.image_src}")
 
-        return ncell 
+        return ncell
 
     def get_num_cells(self) -> int:
         """Read number of memory cells from fast data."""
@@ -83,7 +87,7 @@ class AgipdCtrl:
             # the function returns wrong value.
             ncell = self._get_num_cells_instr()
 
-        return ncell 
+        return ncell
 
     def _get_acq_rate_ctrl(self) -> Optional[float]:
         """Get acquisition (repetition) rate from CONTROL source."""
@@ -582,6 +586,7 @@ class AgipdCorrections:
         self.photon_energy = 9.2
         self.rounding_threshold = 0.5
         self.cs_mg_adjust = 7e3
+        self.litpx_threshold = 42
 
         # Output parameters
         self.compress_fields = ['gain', 'mask']
@@ -614,7 +619,8 @@ class AgipdCorrections:
                           'zero_orange', 'force_hg_if_below',
                           'force_mg_if_below', 'mask_noisy_adc',
                           'melt_snow', 'common_mode', 'mask_zero_std',
-                          'low_medium_gap', 'round_photons']
+                          'low_medium_gap', 'round_photons',
+                          'count_lit_pixels']
 
         if set(corr_bools).issubset(tot_corr_bools):
             self.corr_bools = corr_bools
@@ -764,9 +770,9 @@ class AgipdCorrections:
         seqno = int(tokens[3][1:])
 
         agipd_base = self.h5_data_path.format(modno)
-        karabo_id, _, channel = agipd_base.split('/')
-        channel = channel.partition(":")[0] + ":output"
-        agipd_corr_source = f"{karabo_id}/CORR/{channel}"
+        karabo_id, _, det_channel = agipd_base.split('/')
+        det_channel = det_channel.partition(":")[0] + ":output"
+        agipd_corr_source = f"{karabo_id}/CORR/{det_channel}"
 
         instrument_channels = [f"{agipd_corr_source}/image"]
 
@@ -774,13 +780,23 @@ class AgipdCorrections:
         instrument_channels.append(f"{agipd_base}/image")
         # backward compatibility END
 
+        # create metadata: lit-pixel counter
+        if self.corr_bools.get("count_lit_pixels"):
+            litpx_output_fields = [
+                "cellId", "pulseId", "trainId",
+                "litPixels", "unmaskedPixels", "totalIntensity"
+            ]
+            litpx_index_group = "litpx"
+            litpx_src_name = agipd_corr_source
+            instrument_channels.append(f"{litpx_src_name}/{litpx_index_group}")
+
         with DataFile.from_details(out_folder, agg, runno, seqno) as outfile:
             outfile.create_metadata(
                 like=dc, instrument_channels=instrument_channels)
             outfile.create_index(trains, from_file=dc.files[0])
 
             # All corrected data goes in a /INSTRUMENT/.../image group
-            agipd_src = outfile.create_instrument_source(agipd_corr_source)
+            agipd_src = outfile.require_instrument_source(agipd_corr_source)
             agipd_src.create_index(image=count)
             image_grp = agipd_src.require_group("image")
 
@@ -813,6 +829,15 @@ class AgipdCorrections:
                     field, shape=arr.shape, dtype=arr.dtype, **kw
                 )
 
+            # create schema: lit-pixel counter
+            if self.corr_bools.get("count_lit_pixels"):
+                litpx_src = outfile.require_instrument_source(litpx_src_name)
+                litpx_src.create_index(**{litpx_index_group: count})
+                litpx_group = litpx_src.require_group(litpx_index_group)
+                for field in litpx_output_fields:
+                    litpx_group.create_dataset(
+                        field, shape=(n_img,), dtype=data_dict[field].dtype)
+
             # Write the corrected data
             for field in image_fields:
                 if field in self.compress_fields:
@@ -822,6 +847,11 @@ class AgipdCorrections:
                 else:
                     image_grp[field][:] = data_dict[field][:n_img]
 
+            # write data: lit-pixel counter
+            if self.corr_bools.get("count_lit_pixels"):
+                for field in litpx_output_fields:
+                    litpx_group[field][:] = data_dict[field][:n_img]
+
     def _write_compressed_frames(self, dataset, arr):
         """Compress gain/mask frames in multiple threads, and save their data
 
@@ -1495,9 +1525,32 @@ class AgipdCorrections:
             self.shared_dict[i]["valid_trains"] = sharedmem.empty(1024, dtype="u8")  # noqa
             self.shared_dict[i]["nimg_in_trains"] = sharedmem.empty(1024, dtype="i8")  # noqa
 
+            if self.corr_bools.get("count_lit_pixels"):
+                self.shared_dict[i]["unmaskedPixels"] = sharedmem.full(shape[0], 0, int)  # noqa
+                self.shared_dict[i]["litPixels"] = sharedmem.full(shape[0], 0, int)
+                self.shared_dict[i]["totalIntensity"] = sharedmem.full(shape[0], 0, np.float32)  # noqa
+
         if self.corr_bools.get("round_photons"):
-            self.shared_hist_preround = sharedmem.empty(len(self.hist_bins_preround) - 1, dtype="i8")
-            self.shared_hist_postround = sharedmem.empty(len(self.hist_bins_postround) - 1, dtype="i8")
+            self.shared_hist_preround = sharedmem.empty(len(self.hist_bins_preround) - 1, dtype="i8")  # noqa
+            self.shared_hist_postround = sharedmem.empty(len(self.hist_bins_postround) - 1, dtype="i8")  # noqa
+
+    def litpixel_counter(self, i_proc: int, first: int, last: int):
+        """Lit-pixel counter: counts pixels with a signal above a threshold.
+
+        :param i_proc: Index of shared memory array to process
+        :param first: Index of the first image to be corrected
+        :param last: Index of the last image to be corrected
+        """
+        data = self.shared_dict[i_proc]
+
+        for i in range(first, last):
+            mask = data["mask"][i] == 0
+            image = data["data"][i]
+
+            data["totalIntensity"][i] = np.sum(image, initial=0, where=mask)
+            data["litPixels"][i] = np.sum(image > self.litpx_threshold,
+                                          initial=0, where=mask)
+            data["unmaskedPixels"][i] = np.sum(mask)
 
 
 def validate_selected_pulses(
diff --git a/src/cal_tools/files.py b/src/cal_tools/files.py
index cb0d527c0ae0eaecc201910f4b64d0e4754388ff..9d9f4827f728436578bc98ead87926bdd742d22a 100644
--- a/src/cal_tools/files.py
+++ b/src/cal_tools/files.py
@@ -1,12 +1,12 @@
 
+import re
 from datetime import datetime, timezone
 from itertools import chain
 from numbers import Integral
 from pathlib import Path
-import re
 
-import numpy as np
 import h5py
+import numpy as np
 
 
 def get_pulse_offsets(pulses_per_train):
@@ -257,6 +257,39 @@ class DataFile(h5py.File):
         return InstrumentSource(self.create_group(f'INSTRUMENT/{source}').id,
                                 source)
 
+    def require_control_source(self, source):
+        """Return group for a control source, creating it if it doesn’t exist.
+
+        Control sources created via this method are not required to be
+        passed to create_metadata() again.
+
+        Args:
+            source (str): Karabo device ID.
+
+        Returns:
+            (ControlSource) Group in CONTROL.
+        """
+        group = self.get(f'CONTROL/{source}')
+        return (self.create_control_source(source) if group is None
+                else ControlSource(group.id, source))
+
+    def require_instrument_source(self, source):
+        """Return group for a instrument source, creating it if it doesn’t exist.
+
+        Instrument sources created via this method are not required to be
+        passed to create_metadata() again.
+
+        Args:
+            source (str): Karabp pipeline path, must satisfy
+                DataFile.instrument_source_pattern.
+
+        Returns:
+            (InstrumentSource) Group in INSTRUMENT.
+        """
+        group = self.get(f'INSTRUMENT/{source}')
+        return (self.create_instrument_source(source) if group is None
+                else InstrumentSource(group.id, source))
+
     def create_metadata(self, like=None, *,
                         creation_date=None, update_date=None, proposal=0,
                         run=0, sequence=None, daq_library='1.x',