Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
C
calng
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
calibration
calng
Commits
b01015c4
Commit
b01015c4
authored
2 years ago
by
David Hammer
Browse files
Options
Downloads
Patches
Plain Diff
Allow thread pool usage
parent
e7f96bae
No related branches found
Branches containing commit
No related tags found
2 merge requests
!10
DetectorAssembler: assemble with extra shape (multiple frames)
,
!9
Stacking shmem matcher
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/calng/ShmemTrainMatcher.py
+82
-60
82 additions, 60 deletions
src/calng/ShmemTrainMatcher.py
with
82 additions
and
60 deletions
src/calng/ShmemTrainMatcher.py
+
82
−
60
View file @
b01015c4
import
concurrent.futures
import
enum
import
re
...
...
@@ -91,6 +92,12 @@ class ShmemTrainMatcher(TrainMatcher.TrainMatcher):
.
defaultValue
([])
.
reconfigurable
()
.
commit
(),
BOOL_ELEMENT
(
expected
)
.
key
(
"
useThreadPool
"
)
.
assignmentOptional
()
.
defaultValue
(
False
)
.
commit
(),
)
def
initialization
(
self
):
...
...
@@ -101,6 +108,10 @@ class ShmemTrainMatcher(TrainMatcher.TrainMatcher):
self
.
_prepare_merge_groups
(
self
.
get
(
"
merge
"
))
super
().
initialization
()
self
.
_shmem_handler
=
shmem_utils
.
ShmemCircularBufferReceiver
()
if
self
.
get
(
"
useThreadPool
"
):
self
.
_thread_pool
=
concurrent
.
futures
.
ThreadPoolExecutor
()
else
:
self
.
_thread_pool
=
None
def
preReconfigure
(
self
,
conf
):
super
().
preReconfigure
(
conf
)
...
...
@@ -165,70 +176,81 @@ class ShmemTrainMatcher(TrainMatcher.TrainMatcher):
(
key_re
,
new_key
)
)
def
on_matched_data
(
self
,
train_id
,
sources
):
def
_handle_source
(
self
,
source
,
data_hash
,
timestamp
,
new_
sources
_map
):
# dereference calng shmem handles
for
(
data
,
_
)
in
sources
.
values
():
self
.
_shmem_handler
.
dereference_shmem_handles
(
data
)
self
.
_shmem_handler
.
dereference_shmem_handles
(
data_hash
)
new_sources_map
=
{}
for
source
,
(
data
,
timestamp
)
in
sources
.
items
():
# stack across sources (many sources, same key)
# could probably save ~100 ns by "if ... in" instead of get
for
(
stack_key
,
new_source
)
in
self
.
_source_stacking_sources
.
get
(
source
,
()
):
this_data
=
data
.
get
(
stack_key
)
try
:
this_buffer
=
self
.
_stacking_buffers
[(
new_source
,
stack_key
)]
stack_index
=
self
.
_source_stacking_indices
[(
source
,
stack_key
)]
this_buffer
[
stack_index
]
=
this_data
except
(
ValueError
,
KeyError
):
# ValueError: wrong shape
# KeyError: buffer doesn't exist yet
# either way, create appropriate buffer now
# TODO: complain if shape varies between sources
self
.
_stacking_buffers
[(
new_source
,
stack_key
)]
=
np
.
empty
(
shape
=
(
max
(
index_
for
(
source_
,
key_
,
),
index_
in
self
.
_source_stacking_indices
.
items
()
if
source_
==
source
and
key_
==
stack_key
)
+
1
,
# stack across sources (many sources, same key)
# could probably save ~100 ns by "if ... in" instead of get
for
(
stack_key
,
new_source
)
in
self
.
_source_stacking_sources
.
get
(
source
,
()):
this_data
=
data_hash
.
get
(
stack_key
)
try
:
this_buffer
=
self
.
_stacking_buffers
[(
new_source
,
stack_key
)]
stack_index
=
self
.
_source_stacking_indices
[(
source
,
stack_key
)]
this_buffer
[
stack_index
]
=
this_data
except
(
ValueError
,
IndexError
,
KeyError
):
# ValueError: wrong shape
# KeyError: buffer doesn't exist yet
# either way, create appropriate buffer now
# TODO: complain if shape varies between sources
self
.
_stacking_buffers
[(
new_source
,
stack_key
)]
=
np
.
empty
(
shape
=
(
max
(
index_
for
(
source_
,
key_
,
),
index_
in
self
.
_source_stacking_indices
.
items
()
if
source_
==
source
and
key_
==
stack_key
)
+
this_data
.
shape
,
dtype
=
this_data
.
dtype
,
+
1
,
)
# and then try again
this_buffer
=
self
.
_stacking_buffers
[(
new_source
,
stack_key
)]
stack_index
=
self
.
_source_stacking_indices
[(
source
,
stack_key
)]
this_buffer
[
stack_index
]
=
this_data
# TODO: zero out unfilled buffer entries
data
.
erase
(
stack_key
)
if
new_source
not
in
new_sources_map
:
new_sources_map
[
new_source
]
=
(
Hash
(),
timestamp
)
new_source_hash
=
new_sources_map
[
new_source
][
0
]
if
not
new_source_hash
.
has
(
stack_key
):
new_source_hash
[
stack_key
]
=
this_buffer
# stack keys (multiple keys within this source)
for
(
key_re
,
new_key
)
in
self
.
_key_stacking_sources
.
get
(
source
,
()):
# note: please no overlap between different key_re
# note: if later key_re match earlier new_key, this gets spicy
stack_keys
=
[
key
for
key
in
data
.
paths
()
if
key_re
.
match
(
key
)]
try
:
# TODO: consider reusing buffers here, too
stacked
=
np
.
stack
([
data
.
get
(
key
)
for
key
in
stack_keys
],
axis
=
0
)
except
Exception
as
e
:
self
.
log
.
WARN
(
f
"
Failed to stack
{
key_re
}
for
{
source
}
:
{
e
}
"
)
else
:
for
key
in
stack_keys
:
data
.
erase
(
key
)
data
[
new_key
]
=
stacked
+
this_data
.
shape
,
dtype
=
this_data
.
dtype
,
)
# and then try again
this_buffer
=
self
.
_stacking_buffers
[(
new_source
,
stack_key
)]
stack_index
=
self
.
_source_stacking_indices
[(
source
,
stack_key
)]
this_buffer
[
stack_index
]
=
this_data
# TODO: zero out unfilled buffer entries
data_hash
.
erase
(
stack_key
)
if
new_source
not
in
new_sources_map
:
new_sources_map
[
new_source
]
=
(
Hash
(),
timestamp
)
new_source_hash
=
new_sources_map
[
new_source
][
0
]
if
not
new_source_hash
.
has
(
stack_key
):
new_source_hash
[
stack_key
]
=
this_buffer
# stack keys (multiple keys within this source)
for
(
key_re
,
new_key
)
in
self
.
_key_stacking_sources
.
get
(
source
,
()):
# note: please no overlap between different key_re
# note: if later key_re match earlier new_key, this gets spicy
stack_keys
=
[
key
for
key
in
data_hash
.
paths
()
if
key_re
.
match
(
key
)]
try
:
# TODO: consider reusing buffers here, too
stacked
=
np
.
stack
([
data_hash
.
get
(
key
)
for
key
in
stack_keys
],
axis
=
0
)
except
Exception
as
e
:
self
.
log
.
WARN
(
f
"
Failed to stack
{
key_re
}
for
{
source
}
:
{
e
}
"
)
else
:
for
key
in
stack_keys
:
data_hash
.
erase
(
key
)
data_hash
[
new_key
]
=
stacked
def
on_matched_data
(
self
,
train_id
,
sources
):
new_sources_map
=
{}
if
self
.
_thread_pool
is
None
:
for
source
,
(
data
,
timestamp
)
in
sources
.
items
():
self
.
_handle_source
(
source
,
data
,
timestamp
,
new_sources_map
)
else
:
concurrent
.
futures
.
wait
(
[
self
.
_thread_pool
.
submit
(
self
.
_handle_source
,
data
,
timestamp
,
new_sources_map
)
for
source
,
(
data
,
timestamp
)
in
sources
.
items
()
]
)
sources
.
update
(
new_sources_map
)
...
...
This diff is collapsed.
Click to expand it.
David Hammer
@hammerd
mentioned in commit
9495dda8
·
2 years ago
mentioned in commit
9495dda8
mentioned in commit 9495dda895756fcd9a2ebb5f8adbaa050e067f33
Toggle commit list
David Hammer
@hammerd
mentioned in commit
99de26dd
·
2 years ago
mentioned in commit
99de26dd
mentioned in commit 99de26dda08d02c1c2b5efc1f1f462cfc4f0ce63
Toggle commit list
David Hammer
@hammerd
mentioned in commit
3566342d
·
2 years ago
mentioned in commit
3566342d
mentioned in commit 3566342d031db1bb435b6430f100afdbaaa6c4bf
Toggle commit list
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment