Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
T
tfhello
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Oleg Dzhimiev
tfhello
Commits
b234dbef
Commit
b234dbef
authored
Mar 16, 2020
by
Oleg Dzhimiev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
latest tests
parent
f76d82f2
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
59 additions
and
6 deletions
+59
-6
tfhello.java
src/main/java/tfhello.java
+59
-6
No files found.
src/main/java/tfhello.java
View file @
b234dbef
...
...
@@ -8,6 +8,7 @@ import org.tensorflow.Operation;
import
org.tensorflow.framework.ConfigProto
;
import
org.tensorflow.framework.GPUOptions
;
import
org.tensorflow.framework.CallableOptions
;
import
static
jcuda
.
driver
.
JCudaDriver
.
cuCtxCreate
;
import
static
jcuda
.
driver
.
JCudaDriver
.
cuCtxSynchronize
;
...
...
@@ -34,6 +35,10 @@ import jcuda.driver.CUdevice;
import
jcuda.driver.JCudaDriver
;
import
jcuda.nvrtc.JNvrtc
;
import
jcuda.runtime.cudaPointerAttributes
;
import
jcuda.runtime.JCuda
;
import
jcuda.runtime.cudaError
;
import
java.nio.ByteBuffer
;
import
java.lang.reflect.Field
;
...
...
@@ -108,14 +113,14 @@ public class tfhello{
CUdeviceptr
ptr1
=
new
CUdeviceptr
();
cuMemAlloc
(
ptr1
,
cuSize
);
System
.
out
.
println
(
"CUdeviceptr ptr1 after cuMemAlloc: "
+
ptr1
);
// we will get pointer address as 'long' from JNI
long
ptr1_addr
=
getPointerAddress
(
ptr1
);
System
.
out
.
println
(
"Extracted ptr1 address as (long): "
+
String
.
format
(
"0x%08x"
,
ptr1_addr
));
// notice, there's no cuMemAlloc
CUdeviceptr
ptr2
=
longToCUdeviceptr
(
ptr1_addr
);
System
.
out
.
println
(
"CUdeviceptr ptr2 created from ptr1's long address using java.reflection: "
+
ptr2
);
// test: copy a test array px_in to CUDA
...
...
@@ -143,10 +148,12 @@ public class tfhello{
System
.
out
.
println
(
"Test 3 start\n - Print TF version"
);
System
.
out
.
println
(
TensorFlow
.
version
());
System
.
out
.
println
(
"Test 3 end\n"
);
System
.
out
.
println
(
"Test 4 start\n - Test simple custom JNI function added to TF"
);
System
.
out
.
println
(
TensorFlow
.
elphelVersion
());
System
.
out
.
println
(
"Test 4 end\n"
);
//callableOpts.newBuilder().putFeedDevices(key, value);
try
(
Graph
g
=
new
Graph
())
{
final
String
value
=
"Hello from "
+
TensorFlow
.
version
();
...
...
@@ -171,18 +178,64 @@ public class tfhello{
System
.
out
.
println
(
"Is CUDA tensor? "
+
String
.
valueOf
(
t
.
elphel_isCUDATensor
()));
System
.
out
.
println
(
t
.
elphelTestCUDAPointer
());
long
handle1
;
//session.makeCallable(handle1);
try
(
Session
s
=
new
Session
(
g
,
config
.
toByteArray
());
Session
s
=
new
Session
(
g
,
config
.
toByteArray
())
//s.runner().makeCallable("",handle1);
//s.runner().runCallable(handle1);
// Generally, there may be multiple output tensors,
// all of them must be closed to prevent resource leaks.
Tensor
output
=
s
.
runner
().
fetch
(
"array_tensor_out"
).
feed
(
"array_tensor_in"
,
t
).
run
().
get
(
0
);
){
){
Tensor
output
=
s
.
runner
().
fetch
(
"array_tensor_out"
).
feed
(
"array_tensor_in"
,
t
).
run
().
get
(
0
);
System
.
out
.
println
(
output
.
numBytes
());
int
[]
obuf
=
new
int
[
output
.
numBytes
()/
Sizeof
.
INT
];
output
.
copyTo
(
obuf
);
System
.
out
.
println
(
Arrays
.
toString
(
obuf
));
// natively got GPU device name to insert into options
// it's the same all the time
String
gpuDeviceName
=
s
.
elphelGPUDeviceName
();
// that's for RunCallable() if it ever gets implemented
CallableOptions
callableOpts
=
CallableOptions
.
newBuilder
()
.
addFetch
(
"array_tensor_out:0"
)
.
addFeed
(
"array_tensor_in:0"
)
.
putFeedDevices
(
"array_tensor_in:0"
,
gpuDeviceName
)
.
build
();
System
.
out
.
println
(
callableOpts
);
// GPU allocation:
Tensor
t3
=
Tensor
.
elphelCreateGPUTensor
(
new
long
[]{
256
},
DataType
.
INT32
);
//System.out.println(t2.nativeRef);
long
t3_gpuptr
=
t3
.
elphel_GetGPUTensorPointer
();
System
.
out
.
println
(
String
.
format
(
"0x%08x"
,
t3_gpuptr
));
CUdeviceptr
ptr3
=
longToCUdeviceptr
(
t3_gpuptr
);
cudaPointerAttributes
attrs
=
new
cudaPointerAttributes
();
int
res
=
JCuda
.
cudaPointerGetAttributes
(
attrs
,
ptr3
);
if
(
res
==
cudaError
.
cudaErrorInvalidValue
)
{
System
.
out
.
println
(
"Invalid pointer value"
);
}
if
(
attrs
.
device
==-
1
){
System
.
out
.
println
(
"Not a CUDA device"
);
}
System
.
out
.
println
(
"cuda pointer attributes?! "
+
res
);
System
.
out
.
println
(
attrs
.
toString
());
cuMemcpyHtoD
(
ptr3
,
Pointer
.
to
(
px_in
),
cuSize
);
cuMemcpyDtoH
(
Pointer
.
to
(
px_out
),
ptr3
,
cuSize
);
System
.
out
.
println
(
Arrays
.
toString
(
px_out
));
// check if it a GPU pointer
}
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment