Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
O
onlineXPCS
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
dataAnalysis
onlineXPCS
Commits
8714bba8
Commit
8714bba8
authored
11 months ago
by
Jakobsen, Mads Bregenholt
Browse files
Options
Downloads
Patches
Plain Diff
this file contains functinalty to test the xpcs functions
parent
dde9ab6f
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
testing/test.py
+148
-0
148 additions, 0 deletions
testing/test.py
with
148 additions
and
0 deletions
testing/test.py
0 → 100644
+
148
−
0
View file @
8714bba8
# this is a file that tests the different xpcs implementations
# i.e., fast XPCS and myxpcs (from mads)
import
time
import
numpy
as
np
#import cv2
import
matplotlib.pyplot
as
pl
# get some data
dataShape
=
(
352
,
512
,
128
)
photonEnergy
=
13.3
# in ADU
# this is a function that generates a dummy panel dataset given a shape
# input: -shape, the shape of the data generated,
# shape = (numBunches, panelSlowScanDim, panelFastScanDim)
# e.g. shape = (352, 512, 128)
#
# -chanceOfPhotonPerPixel, is the chance of measuring a photon per pixel
# e.g. 0.05 or 0.01, note that measuring 2 pixels then has chance
# 0.05**2 = 0.0025 and measureing 3 pixel has change 0.05**3
#
# -correlationPerBunch, is the correlation from one image to the next
# if this is equal to 1, then it will be same image repeated numBunches times
# if this is equal to 0, then it will be complely uncorrelated, e.g., random
#
# -energy, the energy of a photon in ADU, e.g., 15.
# the position of the peaks for the 0, 1, 2, 3, ... photon hits is at
# respective integer multiples of this value
#
# -photonized, a boolean that decides wheather the output is photonized (integer values)
# If not, then there is a gaussian noise added to the photonized values
#
# -sigma, the standard deviation of the gaussian noise added. at around 0.15 the pixels are still relatively well
# localized within +/- 0.5 around the integer values. if sigma > 0.15 then the peaks start to smear into one another
def
getDummyData
(
shape
,
chanceOfPhotonPerPixel
=
0.01
,
correlationPerBunch
=
0.5
,
energy
=
15
,
photonized
=
True
,
sigma
=
0.15
):
data
=
np
.
zeros
(
shape
,
dtype
=
float
)
numPixelPrImage
=
shape
[
1
]
*
shape
[
2
]
def
getRandomImage
(
q
=
chanceOfPhotonPerPixel
):
return
np
.
reshape
(
np
.
random
.
choice
(
[
0
,
1
,
2
,
3
,
4
,
5
,
6
],
size
=
numPixelPrImage
,
p
=
[
q
**
0
,
q
**
1
,
q
**
2
,
q
**
3
,
q
**
4
,
q
**
5
,
q
**
6
]
/
np
.
sum
([
q
**
0
,
q
**
1
,
q
**
2
,
q
**
3
,
q
**
4
,
q
**
5
,
q
**
6
])),(
shape
[
1
],
shape
[
2
]))
def
evolveImageFrom
(
image
,
c
=
correlationPerBunch
):
randMask
=
np
.
reshape
(
np
.
random
.
choice
(
[
0
,
1
],
size
=
numPixelPrImage
,
p
=
[
1
-
c
,
c
]),(
shape
[
1
],
shape
[
2
]))
return
np
.
where
(
randMask
==
1
,
image
,
getRandomImage
())
data
[
0
,:,:]
=
np
.
reshape
(
getRandomImage
(),
(
shape
[
1
],
shape
[
2
]))
for
pulseNr
in
range
(
1
,
dataShape
[
0
]):
data
[
pulseNr
,
:,
:]
=
evolveImageFrom
(
data
[
pulseNr
-
1
,:,:])
# if we do not returned photnoized data, then add a gaussian random variable
if
(
~
photonized
):
data
+=
sigma
*
np
.
random
.
standard_normal
(
size
=
shape
)
data
*=
energy
return
data
data
=
getDummyData
(
dataShape
,
chanceOfPhotonPerPixel
=
0.01
,
correlationPerBunch
=
0.5
,
photonized
=
False
,
energy
=
photonEnergy
,
sigma
=
0.12
)
# function to do the xpcs the same way as in the xpcs class
# in the xpcs calng addon
# input : -image, the data set, e.g., data
def
xpcs
(
image
,
energy
):
image
/=
1
image
=
np
.
around
(
image
)
flat_image_shape
=
image
.
reshape
(
image
.
shape
[
0
],
-
1
).
shape
print
(
flat_image_shape
)
reshaped_data
=
image
.
reshape
(
flat_image_shape
).
T
print
(
reshaped_data
.
shape
)
indices
=
np
.
indices
(
reshaped_data
.
shape
)
# make a mask that picls out those pixels that are
# lit, i.e., have a value larger than 0
lit_mask
=
reshaped_data
>
0
# get the values of those that have a value larger than 0
lit_pixels
=
reshaped_data
[
lit_mask
].
astype
(
np
.
uint32
)
# get the indices of those that
lit_indices
=
indices
[:,
lit_mask
].
astype
(
np
.
uint32
)
# initalize a sparse array
sparse_array
=
np
.
empty_like
(
lit_pixels
,
dtype
=
np
.
uint32
)
# call function to fill sparse array with indices and value via bitpacking
sparsify2
(
lit_pixels
,
lit_indices
,
sparse_array
)
# do stuff
t
=
time
.
time
()
xpcs
(
data
,
photonEnergy
)
elapsed
=
time
.
time
()
-
t
print
(
f
"
elapsed
{
elapsed
}
"
)
# cpu code with python from xpcs
print
(
data
.
shape
)
fig
=
pl
.
hist
(
data
[:,:,:].
ravel
(),
bins
=
500
,
log
=
True
)
pl
.
title
(
'
Histogram of values
'
)
pl
.
xlabel
(
"
value
"
)
pl
.
ylabel
(
"
Frequency
"
)
pl
.
savefig
(
"
abc.png
"
)
print
(
'
hist plotted
'
)
def
scaleForVideo
(
data
):
min
=
np
.
min
(
data
)
max
=
np
.
max
(
data
)
vid
=
np
.
round
(
255
*
(
data
-
min
)
/
(
max
-
min
))
print
(
np
.
min
(
vid
),
np
.
max
(
vid
))
return
np
.
uint8
(
vid
)
#videoData = scaleForVideo(data)
#
#fourcc = cv2.VideoWriter_fourcc(*'mp4v')
#out = cv2.VideoWriter('output.mp4', fourcc, 10.0, (dataShape[1], dataShape[2]))
#for bunchNr in range(dataShape[0]):
# print(bunchNr)
# out.write(cv2.cvtColor(np.transpose(videoData[bunchNr, :, :]),cv2.COLOR_GRAY2BGR))
#out.release()
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment