Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
869a35fa
Commit
869a35fa
authored
Jun 07, 2013
by
Vladislav Vinogradov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fixed constructors for functional objects (added __host__ modifier)
parent
bf7c1c6c
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
72 additions
and
72 deletions
+72
-72
absdiff_mat.cu
modules/gpuarithm/src/cuda/absdiff_mat.cu
+6
-6
absdiff_scalar.cu
modules/gpuarithm/src/cuda/absdiff_scalar.cu
+1
-1
add_mat.cu
modules/gpuarithm/src/cuda/add_mat.cu
+6
-6
add_scalar.cu
modules/gpuarithm/src/cuda/add_scalar.cu
+1
-1
add_weighted.cu
modules/gpuarithm/src/cuda/add_weighted.cu
+2
-2
cmp_mat.cu
modules/gpuarithm/src/cuda/cmp_mat.cu
+8
-8
div_inv.cu
modules/gpuarithm/src/cuda/div_inv.cu
+1
-1
div_mat.cu
modules/gpuarithm/src/cuda/div_mat.cu
+7
-7
div_scalar.cu
modules/gpuarithm/src/cuda/div_scalar.cu
+1
-1
math.cu
modules/gpuarithm/src/cuda/math.cu
+8
-8
minmax_mat.cu
modules/gpuarithm/src/cuda/minmax_mat.cu
+8
-8
mul_mat.cu
modules/gpuarithm/src/cuda/mul_mat.cu
+7
-7
mul_scalar.cu
modules/gpuarithm/src/cuda/mul_scalar.cu
+1
-1
reduce.cu
modules/gpuarithm/src/cuda/reduce.cu
+8
-8
sub_mat.cu
modules/gpuarithm/src/cuda/sub_mat.cu
+6
-6
sub_scalar.cu
modules/gpuarithm/src/cuda/sub_scalar.cu
+1
-1
No files found.
modules/gpuarithm/src/cuda/absdiff_mat.cu
View file @
869a35fa
...
...
@@ -62,8 +62,8 @@ namespace arithm
return vabsdiff4(a, b);
}
__device__ __forceinline__ VAbsDiff4() {}
__
device__ __forceinline__ VAbsDiff4(const VAbsDiff4& other
) {}
__
host__ __
device__ __forceinline__ VAbsDiff4() {}
__
host__ __device__ __forceinline__ VAbsDiff4(const VAbsDiff4&
) {}
};
struct VAbsDiff2 : binary_function<uint, uint, uint>
...
...
@@ -73,8 +73,8 @@ namespace arithm
return vabsdiff2(a, b);
}
__device__ __forceinline__ VAbsDiff2() {}
__
device__ __forceinline__ VAbsDiff2(const VAbsDiff2& other
) {}
__
host__ __
device__ __forceinline__ VAbsDiff2() {}
__
host__ __device__ __forceinline__ VAbsDiff2(const VAbsDiff2&
) {}
};
__device__ __forceinline__ int _abs(int a)
...
...
@@ -97,8 +97,8 @@ namespace arithm
return saturate_cast<T>(_abs(a - b));
}
__device__ __forceinline__ AbsDiffMat() {}
__
device__ __forceinline__ AbsDiffMat(const AbsDiffMat& other
) {}
__
host__ __
device__ __forceinline__ AbsDiffMat() {}
__
host__ __device__ __forceinline__ AbsDiffMat(const AbsDiffMat&
) {}
};
}
...
...
modules/gpuarithm/src/cuda/absdiff_scalar.cu
View file @
869a35fa
...
...
@@ -59,7 +59,7 @@ namespace arithm
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__host__
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
...
...
modules/gpuarithm/src/cuda/add_mat.cu
View file @
869a35fa
...
...
@@ -62,8 +62,8 @@ namespace arithm
return vadd4(a, b);
}
__device__ __forceinline__ VAdd4() {}
__
device__ __forceinline__ VAdd4(const VAdd4& other
) {}
__
host__ __
device__ __forceinline__ VAdd4() {}
__
host__ __device__ __forceinline__ VAdd4(const VAdd4&
) {}
};
struct VAdd2 : binary_function<uint, uint, uint>
...
...
@@ -73,8 +73,8 @@ namespace arithm
return vadd2(a, b);
}
__device__ __forceinline__ VAdd2() {}
__
device__ __forceinline__ VAdd2(const VAdd2& other
) {}
__
host__ __
device__ __forceinline__ VAdd2() {}
__
host__ __device__ __forceinline__ VAdd2(const VAdd2&
) {}
};
template <typename T, typename D> struct AddMat : binary_function<T, T, D>
...
...
@@ -84,8 +84,8 @@ namespace arithm
return saturate_cast<D>(a + b);
}
__device__ __forceinline__ AddMat() {}
__
device__ __forceinline__ AddMat(const AddMat& other
) {}
__
host__ __
device__ __forceinline__ AddMat() {}
__
host__ __device__ __forceinline__ AddMat(const AddMat&
) {}
};
}
...
...
modules/gpuarithm/src/cuda/add_scalar.cu
View file @
869a35fa
...
...
@@ -59,7 +59,7 @@ namespace arithm
{
S val;
explicit AddScalar(S val_) : val(val_) {}
__host__
explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
...
...
modules/gpuarithm/src/cuda/add_weighted.cu
View file @
869a35fa
...
...
@@ -74,7 +74,7 @@ namespace arithm
float beta;
float gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__host__
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
...
...
@@ -87,7 +87,7 @@ namespace arithm
double beta;
double gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__host__
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
...
...
modules/gpuarithm/src/cuda/cmp_mat.cu
View file @
869a35fa
...
...
@@ -62,8 +62,8 @@ namespace arithm
return vcmpeq4(a, b);
}
__device__ __forceinline__ VCmpEq4() {}
__
device__ __forceinline__ VCmpEq4(const VCmpEq4& other
) {}
__
host__ __
device__ __forceinline__ VCmpEq4() {}
__
host__ __device__ __forceinline__ VCmpEq4(const VCmpEq4&
) {}
};
struct VCmpNe4 : binary_function<uint, uint, uint>
{
...
...
@@ -72,8 +72,8 @@ namespace arithm
return vcmpne4(a, b);
}
__device__ __forceinline__ VCmpNe4() {}
__
device__ __forceinline__ VCmpNe4(const VCmpNe4& other
) {}
__
host__ __
device__ __forceinline__ VCmpNe4() {}
__
host__ __device__ __forceinline__ VCmpNe4(const VCmpNe4&
) {}
};
struct VCmpLt4 : binary_function<uint, uint, uint>
{
...
...
@@ -82,8 +82,8 @@ namespace arithm
return vcmplt4(a, b);
}
__device__ __forceinline__ VCmpLt4() {}
__
device__ __forceinline__ VCmpLt4(const VCmpLt4& other
) {}
__
host__ __
device__ __forceinline__ VCmpLt4() {}
__
host__ __device__ __forceinline__ VCmpLt4(const VCmpLt4&
) {}
};
struct VCmpLe4 : binary_function<uint, uint, uint>
{
...
...
@@ -92,8 +92,8 @@ namespace arithm
return vcmple4(a, b);
}
__device__ __forceinline__ VCmpLe4() {}
__
device__ __forceinline__ VCmpLe4(const VCmpLe4& other
) {}
__
host__ __
device__ __forceinline__ VCmpLe4() {}
__
host__ __device__ __forceinline__ VCmpLe4(const VCmpLe4&
) {}
};
template <class Op, typename T>
...
...
modules/gpuarithm/src/cuda/div_inv.cu
View file @
869a35fa
...
...
@@ -59,7 +59,7 @@ namespace arithm
{
S val;
explicit DivInv(S val_) : val(val_) {}
__host__
explicit DivInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
...
...
modules/gpuarithm/src/cuda/div_mat.cu
View file @
869a35fa
...
...
@@ -91,8 +91,8 @@ namespace arithm
return b != 0 ? saturate_cast<D>(a / b) : 0;
}
__device__ __forceinline__ Div() {}
__
device__ __forceinline__ Div(const Div& other
) {}
__
host__ __
device__ __forceinline__ Div() {}
__
host__ __device__ __forceinline__ Div(const Div&
) {}
};
template <typename T> struct Div<T, float> : binary_function<T, T, float>
{
...
...
@@ -101,8 +101,8 @@ namespace arithm
return b != 0 ? static_cast<float>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__
device__ __forceinline__ Div(const Div& other
) {}
__
host__ __
device__ __forceinline__ Div() {}
__
host__ __device__ __forceinline__ Div(const Div&
) {}
};
template <typename T> struct Div<T, double> : binary_function<T, T, double>
{
...
...
@@ -111,15 +111,15 @@ namespace arithm
return b != 0 ? static_cast<double>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__
device__ __forceinline__ Div(const Div& other
) {}
__
host__ __
device__ __forceinline__ Div() {}
__
host__ __device__ __forceinline__ Div(const Div&
) {}
};
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
{
S scale;
explicit DivScale(S scale_) : scale(scale_) {}
__host__
explicit DivScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
...
...
modules/gpuarithm/src/cuda/div_scalar.cu
View file @
869a35fa
...
...
@@ -59,7 +59,7 @@ namespace arithm
{
S val;
explicit DivScalar(S val_) : val(val_) {}
__host__
explicit DivScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
...
...
modules/gpuarithm/src/cuda/math.cu
View file @
869a35fa
...
...
@@ -94,8 +94,8 @@ namespace arithm
return saturate_cast<T>(x * x);
}
__device__ __forceinline__ Sqr() {}
__
device__ __forceinline__ Sqr(const Sqr& other
) {}
__
host__ __
device__ __forceinline__ Sqr() {}
__
host__ __device__ __forceinline__ Sqr(const Sqr&
) {}
};
}
...
...
@@ -190,8 +190,8 @@ namespace arithm
return saturate_cast<T>(f(x));
}
__device__ __forceinline__ Exp() {}
__
device__ __forceinline__ Exp(const Exp& other
) {}
__
host__ __
device__ __forceinline__ Exp() {}
__
host__ __device__ __forceinline__ Exp(const Exp&
) {}
};
}
...
...
@@ -228,7 +228,7 @@ namespace arithm
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__host__ explicit
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
...
...
@@ -239,7 +239,7 @@ namespace arithm
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__host__ explicit
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
...
...
@@ -255,7 +255,7 @@ namespace arithm
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__host__ explicit
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(float e) const
{
...
...
@@ -266,7 +266,7 @@ namespace arithm
{
double power;
PowOp(double power_) : power(power_) {}
__host__ explicit
PowOp(double power_) : power(power_) {}
__device__ __forceinline__ double operator()(double e) const
{
...
...
modules/gpuarithm/src/cuda/minmax_mat.cu
View file @
869a35fa
...
...
@@ -65,8 +65,8 @@ namespace arithm
return vmin4(a, b);
}
__device__ __forceinline__ VMin4() {}
__
device__ __forceinline__ VMin4(const VMin4& other
) {}
__
host__ __
device__ __forceinline__ VMin4() {}
__
host__ __device__ __forceinline__ VMin4(const VMin4&
) {}
};
struct VMin2 : binary_function<uint, uint, uint>
...
...
@@ -76,8 +76,8 @@ namespace arithm
return vmin2(a, b);
}
__device__ __forceinline__ VMin2() {}
__
device__ __forceinline__ VMin2(const VMin2& other
) {}
__
host__ __
device__ __forceinline__ VMin2() {}
__
host__ __device__ __forceinline__ VMin2(const VMin2&
) {}
};
}
...
...
@@ -151,8 +151,8 @@ namespace arithm
return vmax4(a, b);
}
__device__ __forceinline__ VMax4() {}
__
device__ __forceinline__ VMax4(const VMax4& other
) {}
__
host__ __
device__ __forceinline__ VMax4() {}
__
host__ __device__ __forceinline__ VMax4(const VMax4&
) {}
};
struct VMax2 : binary_function<uint, uint, uint>
...
...
@@ -162,8 +162,8 @@ namespace arithm
return vmax2(a, b);
}
__device__ __forceinline__ VMax2() {}
__
device__ __forceinline__ VMax2(const VMax2& other
) {}
__
host__ __
device__ __forceinline__ VMax2() {}
__
host__ __device__ __forceinline__ VMax2(const VMax2&
) {}
};
}
...
...
modules/gpuarithm/src/cuda/mul_mat.cu
View file @
869a35fa
...
...
@@ -69,8 +69,8 @@ namespace arithm
return res;
}
__device__ __forceinline__ Mul_8uc4_32f() {}
__
device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f& other
) {}
__
host__ __
device__ __forceinline__ Mul_8uc4_32f() {}
__
host__ __device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f&
) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
...
...
@@ -81,8 +81,8 @@ namespace arithm
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__device__ __forceinline__ Mul_16sc4_32f() {}
__
device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f& other
) {}
__
host__ __
device__ __forceinline__ Mul_16sc4_32f() {}
__
host__ __device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f&
) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
...
...
@@ -92,15 +92,15 @@ namespace arithm
return saturate_cast<D>(a * b);
}
__device__ __forceinline__ Mul() {}
__
device__ __forceinline__ Mul(const Mul& other
) {}
__
host__ __
device__ __forceinline__ Mul() {}
__
host__ __device__ __forceinline__ Mul(const Mul&
) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
explicit MulScale(S scale_) : scale(scale_) {}
__host__
explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
...
...
modules/gpuarithm/src/cuda/mul_scalar.cu
View file @
869a35fa
...
...
@@ -59,7 +59,7 @@ namespace arithm
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__host__
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
...
...
modules/gpuarithm/src/cuda/reduce.cu
View file @
869a35fa
...
...
@@ -76,8 +76,8 @@ namespace reduce
return r;
}
__device__ __forceinline__ Sum() {}
__device__ __forceinline__ Sum(const Sum&) {}
__
host__ __
device__ __forceinline__ Sum() {}
__
host__ __
device__ __forceinline__ Sum(const Sum&) {}
};
struct Avg
...
...
@@ -100,8 +100,8 @@ namespace reduce
return r / sz;
}
__device__ __forceinline__ Avg() {}
__device__ __forceinline__ Avg(const Avg&) {}
__
host__ __
device__ __forceinline__ Avg() {}
__
host__ __
device__ __forceinline__ Avg(const Avg&) {}
};
struct Min
...
...
@@ -125,8 +125,8 @@ namespace reduce
return r;
}
__device__ __forceinline__ Min() {}
__device__ __forceinline__ Min(const Min&) {}
__
host__ __
device__ __forceinline__ Min() {}
__
host__ __
device__ __forceinline__ Min(const Min&) {}
};
struct Max
...
...
@@ -150,8 +150,8 @@ namespace reduce
return r;
}
__device__ __forceinline__ Max() {}
__device__ __forceinline__ Max(const Max&) {}
__
host__ __
device__ __forceinline__ Max() {}
__
host__ __
device__ __forceinline__ Max(const Max&) {}
};
///////////////////////////////////////////////////////////
...
...
modules/gpuarithm/src/cuda/sub_mat.cu
View file @
869a35fa
...
...
@@ -62,8 +62,8 @@ namespace arithm
return vsub4(a, b);
}
__device__ __forceinline__ VSub4() {}
__
device__ __forceinline__ VSub4(const VSub4& other
) {}
__
host__ __
device__ __forceinline__ VSub4() {}
__
host__ __device__ __forceinline__ VSub4(const VSub4&
) {}
};
struct VSub2 : binary_function<uint, uint, uint>
...
...
@@ -73,8 +73,8 @@ namespace arithm
return vsub2(a, b);
}
__device__ __forceinline__ VSub2() {}
__
device__ __forceinline__ VSub2(const VSub2& other
) {}
__
host__ __
device__ __forceinline__ VSub2() {}
__
host__ __device__ __forceinline__ VSub2(const VSub2&
) {}
};
template <typename T, typename D> struct SubMat : binary_function<T, T, D>
...
...
@@ -84,8 +84,8 @@ namespace arithm
return saturate_cast<D>(a - b);
}
__device__ __forceinline__ SubMat() {}
__
device__ __forceinline__ SubMat(const SubMat& other
) {}
__
host__ __
device__ __forceinline__ SubMat() {}
__
host__ __device__ __forceinline__ SubMat(const SubMat&
) {}
};
}
...
...
modules/gpuarithm/src/cuda/sub_scalar.cu
View file @
869a35fa
...
...
@@ -59,7 +59,7 @@ namespace arithm
{
S val;
explicit SubScalar(S val_) : val(val_) {}
__host__
explicit SubScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment