Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
F
food-round2
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Joel Joseph
food-round2
Commits
bc4f7533
Unverified
Commit
bc4f7533
authored
6 years ago
by
Kai Chen
Committed by
GitHub
6 years ago
Browse files
Options
Downloads
Plain Diff
Merge pull request #499 from yhcao6/pytorch-nightly
fix dcn to support pytorch-nightly
parents
a054aef4
753be5b5
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
mmdet/ops/dcn/src/deform_conv_cuda.cpp
+9
-9
9 additions, 9 deletions
mmdet/ops/dcn/src/deform_conv_cuda.cpp
with
9 additions
and
9 deletions
mmdet/ops/dcn/src/deform_conv_cuda.cpp
+
9
−
9
View file @
bc4f7533
...
@@ -195,11 +195,11 @@ int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight,
...
@@ -195,11 +195,11 @@ int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight,
outputHeight
,
outputWidth
});
outputHeight
,
outputWidth
});
columns
=
at
::
zeros
(
columns
=
at
::
zeros
(
{
nInputPlane
*
kW
*
kH
,
im2col_step
*
outputHeight
*
outputWidth
},
{
nInputPlane
*
kW
*
kH
,
im2col_step
*
outputHeight
*
outputWidth
},
input
.
type
());
input
.
options
());
if
(
ones
.
ndimension
()
!=
2
||
if
(
ones
.
ndimension
()
!=
2
||
ones
.
size
(
0
)
*
ones
.
size
(
1
)
<
outputHeight
*
outputWidth
)
{
ones
.
size
(
0
)
*
ones
.
size
(
1
)
<
outputHeight
*
outputWidth
)
{
ones
=
at
::
ones
({
outputHeight
,
outputWidth
},
input
.
type
());
ones
=
at
::
ones
({
outputHeight
,
outputWidth
},
input
.
options
());
}
}
input
=
input
.
view
({
batchSize
/
im2col_step
,
im2col_step
,
nInputPlane
,
input
=
input
.
view
({
batchSize
/
im2col_step
,
im2col_step
,
nInputPlane
,
...
@@ -211,7 +211,7 @@ int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight,
...
@@ -211,7 +211,7 @@ int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight,
at
::
Tensor
output_buffer
=
at
::
Tensor
output_buffer
=
at
::
zeros
({
batchSize
/
im2col_step
,
nOutputPlane
,
at
::
zeros
({
batchSize
/
im2col_step
,
nOutputPlane
,
im2col_step
*
outputHeight
,
outputWidth
},
im2col_step
*
outputHeight
,
outputWidth
},
output
.
type
());
output
.
options
());
output_buffer
=
output_buffer
.
view
(
output_buffer
=
output_buffer
.
view
(
{
output_buffer
.
size
(
0
),
group
,
output_buffer
.
size
(
1
)
/
group
,
{
output_buffer
.
size
(
0
),
group
,
output_buffer
.
size
(
1
)
/
group
,
...
@@ -299,7 +299,7 @@ int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset,
...
@@ -299,7 +299,7 @@ int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset,
gradInput
=
gradInput
.
view
({
batchSize
,
nInputPlane
,
inputHeight
,
inputWidth
});
gradInput
=
gradInput
.
view
({
batchSize
,
nInputPlane
,
inputHeight
,
inputWidth
});
columns
=
at
::
zeros
(
columns
=
at
::
zeros
(
{
nInputPlane
*
kW
*
kH
,
im2col_step
*
outputHeight
*
outputWidth
},
{
nInputPlane
*
kW
*
kH
,
im2col_step
*
outputHeight
*
outputWidth
},
input
.
type
());
input
.
options
());
// change order of grad output
// change order of grad output
gradOutput
=
gradOutput
.
view
({
batchSize
/
im2col_step
,
im2col_step
,
gradOutput
=
gradOutput
.
view
({
batchSize
/
im2col_step
,
im2col_step
,
...
@@ -414,7 +414,7 @@ int deform_conv_backward_parameters_cuda(
...
@@ -414,7 +414,7 @@ int deform_conv_backward_parameters_cuda(
columns
=
at
::
zeros
(
columns
=
at
::
zeros
(
{
nInputPlane
*
kW
*
kH
,
im2col_step
*
outputHeight
*
outputWidth
},
{
nInputPlane
*
kW
*
kH
,
im2col_step
*
outputHeight
*
outputWidth
},
input
.
type
());
input
.
options
());
gradOutput
=
gradOutput
.
view
({
batchSize
/
im2col_step
,
im2col_step
,
gradOutput
=
gradOutput
.
view
({
batchSize
/
im2col_step
,
im2col_step
,
nOutputPlane
,
outputHeight
,
outputWidth
});
nOutputPlane
,
outputHeight
,
outputWidth
});
...
@@ -518,7 +518,7 @@ void modulated_deform_conv_cuda_forward(
...
@@ -518,7 +518,7 @@ void modulated_deform_conv_cuda_forward(
if
(
ones
.
ndimension
()
!=
2
||
if
(
ones
.
ndimension
()
!=
2
||
ones
.
size
(
0
)
*
ones
.
size
(
1
)
<
height_out
*
width_out
)
{
ones
.
size
(
0
)
*
ones
.
size
(
1
)
<
height_out
*
width_out
)
{
// Resize plane and fill with ones...
// Resize plane and fill with ones...
ones
=
at
::
ones
({
height_out
,
width_out
},
input
.
type
());
ones
=
at
::
ones
({
height_out
,
width_out
},
input
.
options
());
}
}
// resize output
// resize output
...
@@ -526,7 +526,7 @@ void modulated_deform_conv_cuda_forward(
...
@@ -526,7 +526,7 @@ void modulated_deform_conv_cuda_forward(
// resize temporary columns
// resize temporary columns
columns
=
columns
=
at
::
zeros
({
channels
*
kernel_h
*
kernel_w
,
1
*
height_out
*
width_out
},
at
::
zeros
({
channels
*
kernel_h
*
kernel_w
,
1
*
height_out
*
width_out
},
input
.
type
());
input
.
options
());
output
=
output
.
view
({
output
.
size
(
0
),
group
,
output
.
size
(
1
)
/
group
,
output
=
output
.
view
({
output
.
size
(
0
),
group
,
output
.
size
(
1
)
/
group
,
output
.
size
(
2
),
output
.
size
(
3
)});
output
.
size
(
2
),
output
.
size
(
3
)});
...
@@ -597,12 +597,12 @@ void modulated_deform_conv_cuda_backward(
...
@@ -597,12 +597,12 @@ void modulated_deform_conv_cuda_backward(
if
(
ones
.
ndimension
()
!=
2
||
if
(
ones
.
ndimension
()
!=
2
||
ones
.
size
(
0
)
*
ones
.
size
(
1
)
<
height_out
*
width_out
)
{
ones
.
size
(
0
)
*
ones
.
size
(
1
)
<
height_out
*
width_out
)
{
// Resize plane and fill with ones...
// Resize plane and fill with ones...
ones
=
at
::
ones
({
height_out
,
width_out
},
input
.
type
());
ones
=
at
::
ones
({
height_out
,
width_out
},
input
.
options
());
}
}
grad_input
=
grad_input
.
view
({
batch
,
channels
,
height
,
width
});
grad_input
=
grad_input
.
view
({
batch
,
channels
,
height
,
width
});
columns
=
at
::
zeros
({
channels
*
kernel_h
*
kernel_w
,
height_out
*
width_out
},
columns
=
at
::
zeros
({
channels
*
kernel_h
*
kernel_w
,
height_out
*
width_out
},
input
.
type
());
input
.
options
());
grad_output
=
grad_output
=
grad_output
.
view
({
grad_output
.
size
(
0
),
group
,
grad_output
.
size
(
1
)
/
group
,
grad_output
.
view
({
grad_output
.
size
(
0
),
group
,
grad_output
.
size
(
1
)
/
group
,
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment