Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
D
dlib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
钟尚武
dlib
Commits
95c5e959
Commit
95c5e959
authored
Aug 22, 2016
by
Davis King
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added annotation() to tensor so that you can associate any object you want
with a tensor.
parent
3a9b1242
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
40 additions
and
3 deletions
+40
-3
tensor.h
dlib/dnn/tensor.h
+17
-3
tensor_abstract.h
dlib/dnn/tensor_abstract.h
+23
-0
No files found.
dlib/dnn/tensor.h
View file @
95c5e959
...
@@ -10,6 +10,7 @@
...
@@ -10,6 +10,7 @@
#include "gpu_data.h"
#include "gpu_data.h"
#include "../byte_orderer.h"
#include "../byte_orderer.h"
#include <memory>
#include <memory>
#include "../any.h"
namespace
dlib
namespace
dlib
{
{
...
@@ -52,6 +53,9 @@ namespace dlib
...
@@ -52,6 +53,9 @@ namespace dlib
virtual
float
*
device
()
=
0
;
virtual
float
*
device
()
=
0
;
virtual
float
*
device_write_only
()
=
0
;
virtual
float
*
device_write_only
()
=
0
;
virtual
const
any
&
annotation
()
const
=
0
;
virtual
any
&
annotation
()
=
0
;
int
device_id
()
const
{
return
data
().
device_id
();
}
int
device_id
()
const
{
return
data
().
device_id
();
}
tensor
&
operator
=
(
float
val
)
tensor
&
operator
=
(
float
val
)
...
@@ -283,13 +287,13 @@ namespace dlib
...
@@ -283,13 +287,13 @@ namespace dlib
set_size
(
n_
,
k_
,
nr_
,
nc_
);
set_size
(
n_
,
k_
,
nr_
,
nc_
);
}
}
resizable_tensor
(
const
resizable_tensor
&
item
)
resizable_tensor
(
const
resizable_tensor
&
item
)
:
_annotation
(
item
.
annotation
())
{
{
// TODO, do the copy with cuda?
// TODO, do the copy with cuda?
copy_size
(
item
);
copy_size
(
item
);
std
::
memcpy
(
data_instance
.
host
(),
item
.
host
(),
data_instance
.
size
()
*
sizeof
(
float
));
std
::
memcpy
(
data_instance
.
host
(),
item
.
host
(),
data_instance
.
size
()
*
sizeof
(
float
));
}
}
resizable_tensor
(
const
tensor
&
item
)
resizable_tensor
(
const
tensor
&
item
)
:
_annotation
(
item
.
annotation
())
{
{
// TODO, do the copy with cuda?
// TODO, do the copy with cuda?
copy_size
(
item
);
copy_size
(
item
);
...
@@ -306,10 +310,14 @@ namespace dlib
...
@@ -306,10 +310,14 @@ namespace dlib
virtual
float
*
device
()
{
return
data_instance
.
device
();
}
virtual
float
*
device
()
{
return
data_instance
.
device
();
}
virtual
float
*
device_write_only
()
{
return
data_instance
.
device_write_only
();
}
virtual
float
*
device_write_only
()
{
return
data_instance
.
device_write_only
();
}
virtual
const
any
&
annotation
()
const
{
return
_annotation
;
}
virtual
any
&
annotation
()
{
return
_annotation
;
}
void
clear
(
void
clear
(
)
)
{
{
set_size
(
0
,
0
,
0
,
0
);
set_size
(
0
,
0
,
0
,
0
);
_annotation
.
clear
();
}
}
void
copy_size
(
void
copy_size
(
...
@@ -377,6 +385,7 @@ namespace dlib
...
@@ -377,6 +385,7 @@ namespace dlib
std
::
swap
(
m_nc
,
item
.
m_nc
);
std
::
swap
(
m_nc
,
item
.
m_nc
);
std
::
swap
(
m_size
,
item
.
m_size
);
std
::
swap
(
m_size
,
item
.
m_size
);
std
::
swap
(
data_instance
,
item
.
data_instance
);
std
::
swap
(
data_instance
,
item
.
data_instance
);
std
::
swap
(
_annotation
,
item
.
_annotation
);
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
std
::
swap
(
cudnn_descriptor
,
item
.
cudnn_descriptor
);
std
::
swap
(
cudnn_descriptor
,
item
.
cudnn_descriptor
);
#endif
#endif
...
@@ -394,6 +403,7 @@ namespace dlib
...
@@ -394,6 +403,7 @@ namespace dlib
#endif
#endif
gpu_data
data_instance
;
gpu_data
data_instance
;
any
_annotation
;
virtual
gpu_data
&
data
()
{
return
data_instance
;
}
virtual
gpu_data
&
data
()
{
return
data_instance
;
}
virtual
const
gpu_data
&
data
()
const
{
return
data_instance
;
}
virtual
const
gpu_data
&
data
()
const
{
return
data_instance
;
}
};
};
...
@@ -470,7 +480,7 @@ namespace dlib
...
@@ -470,7 +480,7 @@ namespace dlib
class
alias_tensor_instance
:
public
tensor
class
alias_tensor_instance
:
public
tensor
{
{
alias_tensor_instance
(
alias_tensor_instance
(
)
:
data_instance
(
0
),
data_offset
(
0
)
{}
)
:
data_instance
(
0
),
_annotation
(
0
),
data_offset
(
0
)
{}
public
:
public
:
friend
class
alias_tensor
;
friend
class
alias_tensor
;
...
@@ -495,6 +505,8 @@ namespace dlib
...
@@ -495,6 +505,8 @@ namespace dlib
virtual
float
*
device
()
{
return
data_instance
->
device
()
+
data_offset
;
}
virtual
float
*
device
()
{
return
data_instance
->
device
()
+
data_offset
;
}
virtual
float
*
device_write_only
()
{
return
data_instance
->
device
()
+
data_offset
;
}
virtual
float
*
device_write_only
()
{
return
data_instance
->
device
()
+
data_offset
;
}
virtual
const
any
&
annotation
()
const
{
return
*
_annotation
;
}
virtual
any
&
annotation
()
{
return
*
_annotation
;
}
#ifdef DLIB_USE_CUDA
#ifdef DLIB_USE_CUDA
virtual
const
cuda
::
tensor_descriptor
&
get_cudnn_tensor_descriptor
(
virtual
const
cuda
::
tensor_descriptor
&
get_cudnn_tensor_descriptor
(
...
@@ -508,6 +520,7 @@ namespace dlib
...
@@ -508,6 +520,7 @@ namespace dlib
std
::
shared_ptr
<
cuda
::
tensor_descriptor
>
cudnn_descriptor
;
std
::
shared_ptr
<
cuda
::
tensor_descriptor
>
cudnn_descriptor
;
#endif
#endif
gpu_data
*
data_instance
;
gpu_data
*
data_instance
;
any
*
_annotation
;
size_t
data_offset
;
size_t
data_offset
;
virtual
gpu_data
&
data
()
{
return
*
data_instance
;
}
virtual
gpu_data
&
data
()
{
return
*
data_instance
;
}
virtual
const
gpu_data
&
data
()
const
{
return
*
data_instance
;
}
virtual
const
gpu_data
&
data
()
const
{
return
*
data_instance
;
}
...
@@ -563,6 +576,7 @@ namespace dlib
...
@@ -563,6 +576,7 @@ namespace dlib
}
}
#endif
#endif
inst
.
data_instance
=
&
t
.
data
();
inst
.
data_instance
=
&
t
.
data
();
inst
.
_annotation
=
&
t
.
annotation
();
// Note that t might already be an aliasing tensor so we need to take that into
// Note that t might already be an aliasing tensor so we need to take that into
// account.
// account.
inst
.
data_offset
=
t
.
get_alias_offset
()
+
offset
;
inst
.
data_offset
=
t
.
get_alias_offset
()
+
offset
;
...
...
dlib/dnn/tensor_abstract.h
View file @
95c5e959
...
@@ -4,6 +4,7 @@
...
@@ -4,6 +4,7 @@
#ifdef DLIB_DNn_TENSOR_ABSTRACT_H_
#ifdef DLIB_DNn_TENSOR_ABSTRACT_H_
#include "../matrix.h"
#include "../matrix.h"
#include "../any/any_abstract.h"
namespace
dlib
namespace
dlib
{
{
...
@@ -187,6 +188,26 @@ namespace dlib
...
@@ -187,6 +188,26 @@ namespace dlib
every memory location in the returned memory block.
every memory location in the returned memory block.
!*/
!*/
virtual
const
any
&
annotation
(
)
const
=
0
;
/*!
ensures
- returns a const reference to the any object in this tensor. The any
object can be used to store any additional annotation you like in a
tensor. However, it should be noted that the annotation() is ignored by
serialize() and therefore not saved when a tensor is serialized.
!*/
virtual
any
&
annotation
(
)
=
0
;
/*!
ensures
- returns a non-const reference to the any object in this tensor. The any
object can be used to store any additional annotation you like in a
tensor. However, it should be noted that the annotation() is ignored by
serialize() and therefore not saved when a tensor is serialized.
!*/
int
device_id
(
int
device_id
(
)
const
;
)
const
;
/*!
/*!
...
@@ -461,6 +482,7 @@ namespace dlib
...
@@ -461,6 +482,7 @@ namespace dlib
- #k() == 0
- #k() == 0
- #nr() == 0
- #nr() == 0
- #nc() == 0
- #nc() == 0
- #annotation().is_empty() == true
!*/
!*/
void
copy_size
(
void
copy_size
(
...
@@ -617,6 +639,7 @@ namespace dlib
...
@@ -617,6 +639,7 @@ namespace dlib
- T.nc() == nc()
- T.nc() == nc()
- T.host() == t.host()+offset
- T.host() == t.host()+offset
- T.device() == t.device()+offset
- T.device() == t.device()+offset
- &T.annotation() == &t.annotation()
!*/
!*/
};
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment